/* * Copyright (c) 2015-2019 The Khronos Group Inc. * Copyright (c) 2015-2019 Valve Corporation * Copyright (c) 2015-2019 LunarG, Inc. * Copyright (c) 2015-2019 Google, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Author: Chia-I Wu * Author: Chris Forbes * Author: Courtney Goeltzenleuchter * Author: Mark Lobodzinski * Author: Mike Stroyan * Author: Tobin Ehlis * Author: Tony Barbour * Author: Cody Northrop * Author: Dave Houlton * Author: Jeremy Kniager * Author: Shannon McPherson */ #ifdef ANDROID #include "vulkan_wrapper.h" #else #define NOMINMAX #include #endif #include "layers/vk_device_profile_api_layer.h" #if defined(ANDROID) && defined(VALIDATION_APK) #include #include #endif #include "icd-spv.h" #include "test_common.h" #include "vk_layer_config.h" #include "vk_format_utils.h" #include "vkrenderframework.h" #include "vk_typemap_helper.h" #include "convert_to_renderpass2.h" #include #include #include #include #include #include //-------------------------------------------------------------------------------------- // Mesh and VertexFormat Data //-------------------------------------------------------------------------------------- const char *kSkipPrefix = " TEST SKIPPED:"; enum BsoFailSelect { BsoFailNone, BsoFailLineWidth, BsoFailDepthBias, BsoFailViewport, BsoFailScissor, BsoFailBlend, BsoFailDepthBounds, BsoFailStencilReadMask, BsoFailStencilWriteMask, BsoFailStencilReference, BsoFailCmdClearAttachments, BsoFailIndexBuffer, BsoFailIndexBufferBadSize, BsoFailIndexBufferBadOffset, BsoFailIndexBufferBadMapSize, BsoFailIndexBufferBadMapOffset }; static const char bindStateVertShaderText[] = "#version 450\n" "vec2 vertices[3];\n" "void main() {\n" " vertices[0] = vec2(-1.0, -1.0);\n" " vertices[1] = vec2( 1.0, -1.0);\n" " vertices[2] = vec2( 0.0, 1.0);\n" " gl_Position = vec4(vertices[gl_VertexIndex % 3], 0.0, 1.0);\n" "}\n"; static const char bindStateFragShaderText[] = "#version 450\n" "\n" "layout(location = 0) out vec4 uFragColor;\n" "void main(){\n" " uFragColor = vec4(0,1,0,1);\n" "}\n"; // Static arrays helper template size_t size(ElementT (&)[array_size]) { return array_size; } // Format search helper VkFormat FindSupportedDepthStencilFormat(VkPhysicalDevice phy) { VkFormat ds_formats[] = {VK_FORMAT_D16_UNORM_S8_UINT, VK_FORMAT_D24_UNORM_S8_UINT, VK_FORMAT_D32_SFLOAT_S8_UINT}; for (uint32_t i = 0; i < sizeof(ds_formats); i++) { VkFormatProperties format_props; vkGetPhysicalDeviceFormatProperties(phy, ds_formats[i], &format_props); if (format_props.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) { return ds_formats[i]; } } return VK_FORMAT_UNDEFINED; } // Returns true if *any* requested features are available. // Assumption is that the framework can successfully create an image as // long as at least one of the feature bits is present (excepting VTX_BUF). bool ImageFormatIsSupported(VkPhysicalDevice phy, VkFormat format, VkImageTiling tiling = VK_IMAGE_TILING_OPTIMAL, VkFormatFeatureFlags features = ~VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) { VkFormatProperties format_props; vkGetPhysicalDeviceFormatProperties(phy, format, &format_props); VkFormatFeatureFlags phy_features = (VK_IMAGE_TILING_OPTIMAL == tiling ? format_props.optimalTilingFeatures : format_props.linearTilingFeatures); return (0 != (phy_features & features)); } // Returns true if format and *all* requested features are available. bool ImageFormatAndFeaturesSupported(VkPhysicalDevice phy, VkFormat format, VkImageTiling tiling, VkFormatFeatureFlags features) { VkFormatProperties format_props; vkGetPhysicalDeviceFormatProperties(phy, format, &format_props); VkFormatFeatureFlags phy_features = (VK_IMAGE_TILING_OPTIMAL == tiling ? format_props.optimalTilingFeatures : format_props.linearTilingFeatures); return (features == (phy_features & features)); } // Returns true if format and *all* requested features are available. bool ImageFormatAndFeaturesSupported(const VkInstance inst, const VkPhysicalDevice phy, const VkImageCreateInfo info, const VkFormatFeatureFlags features) { // Verify physical device support of format features if (!ImageFormatAndFeaturesSupported(phy, info.format, info.tiling, features)) { return false; } // Verify that PhysDevImageFormatProp() also claims support for the specific usage VkImageFormatProperties props; VkResult err = vkGetPhysicalDeviceImageFormatProperties(phy, info.format, info.imageType, info.tiling, info.usage, info.flags, &props); if (VK_SUCCESS != err) { return false; } #if 0 // Convinced this chunk doesn't currently add any additional info, but leaving in place because it may be // necessary with future extensions // Verify again using version 2, if supported, which *can* return more property data than the original... // (It's not clear that this is any more definitive than using the original version - but no harm) PFN_vkGetPhysicalDeviceImageFormatProperties2KHR p_GetPDIFP2KHR = (PFN_vkGetPhysicalDeviceImageFormatProperties2KHR)vkGetInstanceProcAddr(inst, "vkGetPhysicalDeviceImageFormatProperties2KHR"); if (NULL != p_GetPDIFP2KHR) { VkPhysicalDeviceImageFormatInfo2KHR fmt_info{}; fmt_info.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR; fmt_info.pNext = nullptr; fmt_info.format = info.format; fmt_info.type = info.imageType; fmt_info.tiling = info.tiling; fmt_info.usage = info.usage; fmt_info.flags = info.flags; VkImageFormatProperties2KHR fmt_props = {}; fmt_props.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR; err = p_GetPDIFP2KHR(phy, &fmt_info, &fmt_props); if (VK_SUCCESS != err) { return false; } } #endif return true; } // Validation report callback prototype static VKAPI_ATTR VkBool32 VKAPI_CALL myDbgFunc(VkFlags msgFlags, VkDebugReportObjectTypeEXT objType, uint64_t srcObject, size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg, void *pUserData); // Simple sane SamplerCreateInfo boilerplate static VkSamplerCreateInfo SafeSaneSamplerCreateInfo() { VkSamplerCreateInfo sampler_create_info = {}; sampler_create_info.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; sampler_create_info.pNext = nullptr; sampler_create_info.magFilter = VK_FILTER_NEAREST; sampler_create_info.minFilter = VK_FILTER_NEAREST; sampler_create_info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST; sampler_create_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; sampler_create_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; sampler_create_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; sampler_create_info.mipLodBias = 0.0; sampler_create_info.anisotropyEnable = VK_FALSE; sampler_create_info.maxAnisotropy = 1.0; sampler_create_info.compareEnable = VK_FALSE; sampler_create_info.compareOp = VK_COMPARE_OP_NEVER; sampler_create_info.minLod = 0.0; sampler_create_info.maxLod = 16.0; sampler_create_info.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE; sampler_create_info.unnormalizedCoordinates = VK_FALSE; return sampler_create_info; } // Helper for checking createRenderPass2 support and adding related extensions. static bool CheckCreateRenderPass2Support(VkRenderFramework *renderFramework, std::vector &device_extension_names) { if (renderFramework->DeviceExtensionSupported(renderFramework->gpu(), nullptr, VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME)) { device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME); device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME); device_extension_names.push_back(VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); return true; } return false; } // Dependent "false" type for the static assert, as GCC will evaluate // non-dependent static_asserts even for non-instantiated templates template struct AlwaysFalse : std::false_type {}; // Helpers to get nearest greater or smaller value (of float) -- useful for testing the boundary cases of Vulkan limits template T NearestGreater(const T from) { using Lim = std::numeric_limits; const auto positive_direction = Lim::has_infinity ? Lim::infinity() : Lim::max(); return std::nextafter(from, positive_direction); } template T NearestSmaller(const T from) { using Lim = std::numeric_limits; const auto negative_direction = Lim::has_infinity ? -Lim::infinity() : Lim::lowest(); return std::nextafter(from, negative_direction); } // ErrorMonitor Usage: // // Call SetDesiredFailureMsg with a string to be compared against all // encountered log messages, or a validation error enum identifying // desired error message. Passing NULL or VALIDATION_ERROR_MAX_ENUM // will match all log messages. logMsg will return true for skipCall // only if msg is matched or NULL. // // Call VerifyFound to determine if all desired failure messages // were encountered. Call VerifyNotFound to determine if any unexpected // failure was encountered. class ErrorMonitor { public: ErrorMonitor() { test_platform_thread_create_mutex(&mutex_); test_platform_thread_lock_mutex(&mutex_); Reset(); test_platform_thread_unlock_mutex(&mutex_); } ~ErrorMonitor() { test_platform_thread_delete_mutex(&mutex_); } // Set monitor to pristine state void Reset() { message_flags_ = VK_DEBUG_REPORT_ERROR_BIT_EXT; bailout_ = NULL; message_found_ = VK_FALSE; failure_message_strings_.clear(); desired_message_strings_.clear(); ignore_message_strings_.clear(); other_messages_.clear(); } // ErrorMonitor will look for an error message containing the specified string(s) void SetDesiredFailureMsg(const VkFlags msgFlags, const std::string msg) { SetDesiredFailureMsg(msgFlags, msg.c_str()); } void SetDesiredFailureMsg(const VkFlags msgFlags, const char *const msgString) { test_platform_thread_lock_mutex(&mutex_); desired_message_strings_.insert(msgString); message_flags_ |= msgFlags; test_platform_thread_unlock_mutex(&mutex_); } // ErrorMonitor will look for an error message containing the specified string(s) template void SetDesiredFailureMsg(const VkFlags msgFlags, Iter iter, const Iter end) { for (; iter != end; ++iter) { SetDesiredFailureMsg(msgFlags, *iter); } } // Set an error that the error monitor will ignore. Do not use this function if you are creating a new test. // TODO: This is stopgap to block new unexpected errors from being introduced. The long-term goal is to remove the use of this // function and its definition. void SetUnexpectedError(const char *const msg) { test_platform_thread_lock_mutex(&mutex_); ignore_message_strings_.emplace_back(msg); test_platform_thread_unlock_mutex(&mutex_); } VkBool32 CheckForDesiredMsg(const char *const msgString) { VkBool32 result = VK_FALSE; test_platform_thread_lock_mutex(&mutex_); if (bailout_ != nullptr) { *bailout_ = true; } string errorString(msgString); bool found_expected = false; if (!IgnoreMessage(errorString)) { for (auto desired_msg_it = desired_message_strings_.begin(); desired_msg_it != desired_message_strings_.end(); ++desired_msg_it) { if ((*desired_msg_it).length() == 0) { // An empty desired_msg string "" indicates a positive test - not expecting an error. // Return true to avoid calling layers/driver with this error. // And don't erase the "" string, so it remains if another error is found. result = VK_TRUE; found_expected = true; message_found_ = true; failure_message_strings_.insert(errorString); } else if (errorString.find(*desired_msg_it) != string::npos) { found_expected = true; failure_message_strings_.insert(errorString); message_found_ = true; result = VK_TRUE; // Remove a maximum of one failure message from the set // Multiset mutation is acceptable because `break` causes flow of control to exit the for loop desired_message_strings_.erase(desired_msg_it); break; } } if (!found_expected) { printf("Unexpected: %s\n", msgString); other_messages_.push_back(errorString); } } test_platform_thread_unlock_mutex(&mutex_); return result; } vector GetOtherFailureMsgs() const { return other_messages_; } VkDebugReportFlagsEXT GetMessageFlags() const { return message_flags_; } bool AnyDesiredMsgFound() const { return message_found_; } bool AllDesiredMsgsFound() const { return desired_message_strings_.empty(); } void SetError(const char *const errorString) { message_found_ = true; failure_message_strings_.insert(errorString); } void SetBailout(bool *bailout) { bailout_ = bailout; } void DumpFailureMsgs() const { vector otherMsgs = GetOtherFailureMsgs(); if (otherMsgs.size()) { cout << "Other error messages logged for this test were:" << endl; for (auto iter = otherMsgs.begin(); iter != otherMsgs.end(); iter++) { cout << " " << *iter << endl; } } } // Helpers // ExpectSuccess now takes an optional argument allowing a custom combination of debug flags void ExpectSuccess(VkDebugReportFlagsEXT const message_flag_mask = VK_DEBUG_REPORT_ERROR_BIT_EXT) { // Match ANY message matching specified type SetDesiredFailureMsg(message_flag_mask, ""); message_flags_ = message_flag_mask; // override mask handling in SetDesired... } void VerifyFound() { // Not receiving expected message(s) is a failure. /Before/ throwing, dump any other messages if (!AllDesiredMsgsFound()) { DumpFailureMsgs(); for (const auto desired_msg : desired_message_strings_) { ADD_FAILURE() << "Did not receive expected error '" << desired_msg << "'"; } } Reset(); } void VerifyNotFound() { // ExpectSuccess() configured us to match anything. Any error is a failure. if (AnyDesiredMsgFound()) { DumpFailureMsgs(); for (const auto msg : failure_message_strings_) { ADD_FAILURE() << "Expected to succeed but got error: " << msg; } } Reset(); } private: // TODO: This is stopgap to block new unexpected errors from being introduced. The long-term goal is to remove the use of this // function and its definition. bool IgnoreMessage(std::string const &msg) const { if (ignore_message_strings_.empty()) { return false; } return std::find_if(ignore_message_strings_.begin(), ignore_message_strings_.end(), [&msg](std::string const &str) { return msg.find(str) != std::string::npos; }) != ignore_message_strings_.end(); } VkFlags message_flags_; std::unordered_multiset desired_message_strings_; std::unordered_multiset failure_message_strings_; std::vector ignore_message_strings_; vector other_messages_; test_platform_thread_mutex mutex_; bool *bailout_; bool message_found_; }; static VKAPI_ATTR VkBool32 VKAPI_CALL myDbgFunc(VkFlags msgFlags, VkDebugReportObjectTypeEXT objType, uint64_t srcObject, size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg, void *pUserData) { ErrorMonitor *errMonitor = (ErrorMonitor *)pUserData; if (msgFlags & errMonitor->GetMessageFlags()) { return errMonitor->CheckForDesiredMsg(pMsg); } return VK_FALSE; } class VkLayerTest : public VkRenderFramework { public: void VKTriangleTest(BsoFailSelect failCase); void GenericDrawPreparation(VkCommandBufferObj *commandBuffer, VkPipelineObj &pipelineobj, VkDescriptorSetObj &descriptorSet, BsoFailSelect failCase); void Init(VkPhysicalDeviceFeatures *features = nullptr, VkPhysicalDeviceFeatures2 *features2 = nullptr, const VkCommandPoolCreateFlags flags = 0, void *instance_pnext = nullptr) { InitFramework(myDbgFunc, m_errorMonitor, instance_pnext); InitState(features, features2, flags); } protected: ErrorMonitor *m_errorMonitor; uint32_t m_instance_api_version = 0; uint32_t m_target_api_version = 0; public: ErrorMonitor *Monitor() { return m_errorMonitor; } VkCommandBufferObj *CommandBuffer() { return m_commandBuffer; } protected: bool m_enableWSI; virtual void SetUp() { m_instance_layer_names.clear(); m_instance_extension_names.clear(); m_device_extension_names.clear(); // Add default instance extensions to the list m_instance_extension_names.push_back(VK_EXT_DEBUG_REPORT_EXTENSION_NAME); // Use Threading layer first to protect others from // ThreadCommandBufferCollision test m_instance_layer_names.push_back("VK_LAYER_GOOGLE_threading"); m_instance_layer_names.push_back("VK_LAYER_LUNARG_parameter_validation"); m_instance_layer_names.push_back("VK_LAYER_LUNARG_object_tracker"); m_instance_layer_names.push_back("VK_LAYER_LUNARG_core_validation"); m_instance_layer_names.push_back("VK_LAYER_GOOGLE_unique_objects"); if (VkTestFramework::m_devsim_layer) { if (InstanceLayerSupported("VK_LAYER_LUNARG_device_simulation")) { m_instance_layer_names.push_back("VK_LAYER_LUNARG_device_simulation"); } else { VkTestFramework::m_devsim_layer = false; printf(" Did not find VK_LAYER_LUNARG_device_simulation layer so it will not be enabled.\n"); } } if (m_enableWSI) { m_instance_extension_names.push_back(VK_KHR_SURFACE_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME); #ifdef NEED_TO_TEST_THIS_ON_PLATFORM #if defined(VK_USE_PLATFORM_ANDROID_KHR) m_instance_extension_names.push_back(VK_KHR_ANDROID_SURFACE_EXTENSION_NAME); #endif // VK_USE_PLATFORM_ANDROID_KHR #if defined(VK_USE_PLATFORM_WAYLAND_KHR) m_instance_extension_names.push_back(VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME); #endif // VK_USE_PLATFORM_WAYLAND_KHR #if defined(VK_USE_PLATFORM_WIN32_KHR) m_instance_extension_names.push_back(VK_KHR_WIN32_SURFACE_EXTENSION_NAME); #endif // VK_USE_PLATFORM_WIN32_KHR #endif // NEED_TO_TEST_THIS_ON_PLATFORM #if defined(VK_USE_PLATFORM_XCB_KHR) m_instance_extension_names.push_back(VK_KHR_XCB_SURFACE_EXTENSION_NAME); #elif defined(VK_USE_PLATFORM_XLIB_KHR) m_instance_extension_names.push_back(VK_KHR_XLIB_SURFACE_EXTENSION_NAME); #endif // VK_USE_PLATFORM_XLIB_KHR } this->app_info.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO; this->app_info.pNext = NULL; this->app_info.pApplicationName = "layer_tests"; this->app_info.applicationVersion = 1; this->app_info.pEngineName = "unittest"; this->app_info.engineVersion = 1; this->app_info.apiVersion = VK_API_VERSION_1_0; m_errorMonitor = new ErrorMonitor; // Find out what version the instance supports and record the default target instance auto enumerateInstanceVersion = (PFN_vkEnumerateInstanceVersion)vkGetInstanceProcAddr(nullptr, "vkEnumerateInstanceVersion"); if (enumerateInstanceVersion) { enumerateInstanceVersion(&m_instance_api_version); } else { m_instance_api_version = VK_API_VERSION_1_0; } m_target_api_version = app_info.apiVersion; } uint32_t SetTargetApiVersion(uint32_t target_api_version) { if (target_api_version == 0) target_api_version = VK_API_VERSION_1_0; if (target_api_version <= m_instance_api_version) { m_target_api_version = target_api_version; app_info.apiVersion = m_target_api_version; } return m_target_api_version; } uint32_t DeviceValidationVersion() { // The validation layers, assume the version we are validating to is the apiVersion unless the device apiVersion is lower VkPhysicalDeviceProperties props; GetPhysicalDeviceProperties(&props); return std::min(m_target_api_version, props.apiVersion); } bool LoadDeviceProfileLayer( PFN_vkSetPhysicalDeviceFormatPropertiesEXT &fpvkSetPhysicalDeviceFormatPropertiesEXT, PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT &fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT) { // Load required functions fpvkSetPhysicalDeviceFormatPropertiesEXT = (PFN_vkSetPhysicalDeviceFormatPropertiesEXT)vkGetInstanceProcAddr(instance(), "vkSetPhysicalDeviceFormatPropertiesEXT"); fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT = (PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT)vkGetInstanceProcAddr( instance(), "vkGetOriginalPhysicalDeviceFormatPropertiesEXT"); if (!(fpvkSetPhysicalDeviceFormatPropertiesEXT) || !(fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT)) { printf("%s Can't find device_profile_api functions; skipped.\n", kSkipPrefix); return 0; } return 1; } virtual void TearDown() { // Clean up resources before we reset ShutdownFramework(); delete m_errorMonitor; } VkLayerTest() { m_enableWSI = false; } }; void VkLayerTest::VKTriangleTest(BsoFailSelect failCase) { ASSERT_TRUE(m_device && m_device->initialized()); // VKTriangleTest assumes Init() has finished ASSERT_NO_FATAL_FAILURE(InitViewport()); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj ps(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipelineobj(m_device); pipelineobj.AddDefaultColorAttachment(); pipelineobj.AddShader(&vs); pipelineobj.AddShader(&ps); bool failcase_needs_depth = false; // to mark cases that need depth attachment VkBufferObj index_buffer; switch (failCase) { case BsoFailLineWidth: { pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_LINE_WIDTH); VkPipelineInputAssemblyStateCreateInfo ia_state = {}; ia_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; ia_state.topology = VK_PRIMITIVE_TOPOLOGY_LINE_LIST; pipelineobj.SetInputAssembly(&ia_state); break; } case BsoFailDepthBias: { pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_DEPTH_BIAS); VkPipelineRasterizationStateCreateInfo rs_state = {}; rs_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rs_state.depthBiasEnable = VK_TRUE; rs_state.lineWidth = 1.0f; pipelineobj.SetRasterization(&rs_state); break; } case BsoFailViewport: { pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_VIEWPORT); break; } case BsoFailScissor: { pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_SCISSOR); break; } case BsoFailBlend: { pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_BLEND_CONSTANTS); VkPipelineColorBlendAttachmentState att_state = {}; att_state.dstAlphaBlendFactor = VK_BLEND_FACTOR_CONSTANT_COLOR; att_state.blendEnable = VK_TRUE; pipelineobj.AddColorAttachment(0, att_state); break; } case BsoFailDepthBounds: { failcase_needs_depth = true; pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_DEPTH_BOUNDS); break; } case BsoFailStencilReadMask: { failcase_needs_depth = true; pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK); break; } case BsoFailStencilWriteMask: { failcase_needs_depth = true; pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_STENCIL_WRITE_MASK); break; } case BsoFailStencilReference: { failcase_needs_depth = true; pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_STENCIL_REFERENCE); break; } case BsoFailIndexBuffer: break; case BsoFailIndexBufferBadSize: case BsoFailIndexBufferBadOffset: case BsoFailIndexBufferBadMapSize: case BsoFailIndexBufferBadMapOffset: { // Create an index buffer for these tests. // There is no need to populate it because we should bail before trying to draw. uint32_t const indices[] = {0}; VkBufferCreateInfo buffer_info = {}; buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_info.size = 1024; buffer_info.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT; buffer_info.queueFamilyIndexCount = 1; buffer_info.pQueueFamilyIndices = indices; index_buffer.init(*m_device, buffer_info, (VkMemoryPropertyFlags)VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); } break; case BsoFailCmdClearAttachments: break; case BsoFailNone: break; default: break; } VkDescriptorSetObj descriptorSet(m_device); VkImageView *depth_attachment = nullptr; if (failcase_needs_depth) { m_depth_stencil_fmt = FindSupportedDepthStencilFormat(gpu()); ASSERT_TRUE(m_depth_stencil_fmt != VK_FORMAT_UNDEFINED); m_depthStencil->Init(m_device, static_cast(m_width), static_cast(m_height), m_depth_stencil_fmt, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT); depth_attachment = m_depthStencil->BindInfo(); } ASSERT_NO_FATAL_FAILURE(InitRenderTarget(1, depth_attachment)); m_commandBuffer->begin(); GenericDrawPreparation(m_commandBuffer, pipelineobj, descriptorSet, failCase); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); // render triangle if (failCase == BsoFailIndexBuffer) { // Use DrawIndexed w/o an index buffer bound m_commandBuffer->DrawIndexed(3, 1, 0, 0, 0); } else if (failCase == BsoFailIndexBufferBadSize) { // Bind the index buffer and draw one too many indices m_commandBuffer->BindIndexBuffer(&index_buffer, 0, VK_INDEX_TYPE_UINT16); m_commandBuffer->DrawIndexed(513, 1, 0, 0, 0); } else if (failCase == BsoFailIndexBufferBadOffset) { // Bind the index buffer and draw one past the end of the buffer using the offset m_commandBuffer->BindIndexBuffer(&index_buffer, 0, VK_INDEX_TYPE_UINT16); m_commandBuffer->DrawIndexed(512, 1, 1, 0, 0); } else if (failCase == BsoFailIndexBufferBadMapSize) { // Bind the index buffer at the middle point and draw one too many indices m_commandBuffer->BindIndexBuffer(&index_buffer, 512, VK_INDEX_TYPE_UINT16); m_commandBuffer->DrawIndexed(257, 1, 0, 0, 0); } else if (failCase == BsoFailIndexBufferBadMapOffset) { // Bind the index buffer at the middle point and draw one past the end of the buffer m_commandBuffer->BindIndexBuffer(&index_buffer, 512, VK_INDEX_TYPE_UINT16); m_commandBuffer->DrawIndexed(256, 1, 1, 0, 0); } else { m_commandBuffer->Draw(3, 1, 0, 0); } if (failCase == BsoFailCmdClearAttachments) { VkClearAttachment color_attachment = {}; color_attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; color_attachment.colorAttachment = 2000000000; // Someone who knew what they were doing would use 0 for the index; VkClearRect clear_rect = {{{0, 0}, {static_cast(m_width), static_cast(m_height)}}, 0, 0}; vkCmdClearAttachments(m_commandBuffer->handle(), 1, &color_attachment, 1, &clear_rect); } // finalize recording of the command buffer m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); m_commandBuffer->QueueCommandBuffer(true); DestroyRenderTarget(); } void VkLayerTest::GenericDrawPreparation(VkCommandBufferObj *commandBuffer, VkPipelineObj &pipelineobj, VkDescriptorSetObj &descriptorSet, BsoFailSelect failCase) { commandBuffer->ClearAllBuffers(m_renderTargets, m_clear_color, m_depthStencil, m_depth_clear_color, m_stencil_clear_color); commandBuffer->PrepareAttachments(m_renderTargets, m_depthStencil); // Make sure depthWriteEnable is set so that Depth fail test will work // correctly // Make sure stencilTestEnable is set so that Stencil fail test will work // correctly VkStencilOpState stencil = {}; stencil.failOp = VK_STENCIL_OP_KEEP; stencil.passOp = VK_STENCIL_OP_KEEP; stencil.depthFailOp = VK_STENCIL_OP_KEEP; stencil.compareOp = VK_COMPARE_OP_NEVER; VkPipelineDepthStencilStateCreateInfo ds_ci = {}; ds_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO; ds_ci.pNext = NULL; ds_ci.depthTestEnable = VK_FALSE; ds_ci.depthWriteEnable = VK_TRUE; ds_ci.depthCompareOp = VK_COMPARE_OP_NEVER; ds_ci.depthBoundsTestEnable = VK_FALSE; if (failCase == BsoFailDepthBounds) { ds_ci.depthBoundsTestEnable = VK_TRUE; ds_ci.maxDepthBounds = 0.0f; ds_ci.minDepthBounds = 0.0f; } ds_ci.stencilTestEnable = VK_TRUE; ds_ci.front = stencil; ds_ci.back = stencil; pipelineobj.SetDepthStencil(&ds_ci); pipelineobj.SetViewport(m_viewports); pipelineobj.SetScissor(m_scissors); descriptorSet.CreateVKDescriptorSet(commandBuffer); VkResult err = pipelineobj.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); ASSERT_VK_SUCCESS(err); vkCmdBindPipeline(commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineobj.handle()); commandBuffer->BindDescriptorSet(descriptorSet); } class VkPositiveLayerTest : public VkLayerTest { public: protected: }; class VkWsiEnabledLayerTest : public VkLayerTest { public: protected: VkWsiEnabledLayerTest() { m_enableWSI = true; } }; class VkBufferTest { public: enum eTestEnFlags { eDoubleDelete, eInvalidDeviceOffset, eInvalidMemoryOffset, eBindNullBuffer, eBindFakeBuffer, eFreeInvalidHandle, eNone, }; enum eTestConditions { eOffsetAlignment = 1 }; static bool GetTestConditionValid(VkDeviceObj *aVulkanDevice, eTestEnFlags aTestFlag, VkBufferUsageFlags aBufferUsage = 0) { if (eInvalidDeviceOffset != aTestFlag && eInvalidMemoryOffset != aTestFlag) { return true; } VkDeviceSize offset_limit = 0; if (eInvalidMemoryOffset == aTestFlag) { VkBuffer vulkanBuffer; VkBufferCreateInfo buffer_create_info = {}; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.size = 32; buffer_create_info.usage = aBufferUsage; vkCreateBuffer(aVulkanDevice->device(), &buffer_create_info, nullptr, &vulkanBuffer); VkMemoryRequirements memory_reqs = {}; vkGetBufferMemoryRequirements(aVulkanDevice->device(), vulkanBuffer, &memory_reqs); vkDestroyBuffer(aVulkanDevice->device(), vulkanBuffer, nullptr); offset_limit = memory_reqs.alignment; } else if ((VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT) & aBufferUsage) { offset_limit = aVulkanDevice->props.limits.minTexelBufferOffsetAlignment; } else if (VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT & aBufferUsage) { offset_limit = aVulkanDevice->props.limits.minUniformBufferOffsetAlignment; } else if (VK_BUFFER_USAGE_STORAGE_BUFFER_BIT & aBufferUsage) { offset_limit = aVulkanDevice->props.limits.minStorageBufferOffsetAlignment; } return eOffsetAlignment < offset_limit; } // A constructor which performs validation tests within construction. VkBufferTest(VkDeviceObj *aVulkanDevice, VkBufferUsageFlags aBufferUsage, eTestEnFlags aTestFlag = eNone) : AllocateCurrent(true), BoundCurrent(false), CreateCurrent(false), InvalidDeleteEn(false), VulkanDevice(aVulkanDevice->device()) { if (eBindNullBuffer == aTestFlag || eBindFakeBuffer == aTestFlag) { VkMemoryAllocateInfo memory_allocate_info = {}; memory_allocate_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_allocate_info.allocationSize = 1; // fake size -- shouldn't matter for the test memory_allocate_info.memoryTypeIndex = 0; // fake type -- shouldn't matter for the test vkAllocateMemory(VulkanDevice, &memory_allocate_info, nullptr, &VulkanMemory); VulkanBuffer = (aTestFlag == eBindNullBuffer) ? VK_NULL_HANDLE : (VkBuffer)0xCDCDCDCDCDCDCDCD; vkBindBufferMemory(VulkanDevice, VulkanBuffer, VulkanMemory, 0); } else { VkBufferCreateInfo buffer_create_info = {}; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.size = 32; buffer_create_info.usage = aBufferUsage; vkCreateBuffer(VulkanDevice, &buffer_create_info, nullptr, &VulkanBuffer); CreateCurrent = true; VkMemoryRequirements memory_requirements; vkGetBufferMemoryRequirements(VulkanDevice, VulkanBuffer, &memory_requirements); VkMemoryAllocateInfo memory_allocate_info = {}; memory_allocate_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_allocate_info.allocationSize = memory_requirements.size + eOffsetAlignment; bool pass = aVulkanDevice->phy().set_memory_type(memory_requirements.memoryTypeBits, &memory_allocate_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); if (!pass) { CreateCurrent = false; vkDestroyBuffer(VulkanDevice, VulkanBuffer, nullptr); return; } vkAllocateMemory(VulkanDevice, &memory_allocate_info, NULL, &VulkanMemory); // NB: 1 is intentionally an invalid offset value const bool offset_en = eInvalidDeviceOffset == aTestFlag || eInvalidMemoryOffset == aTestFlag; vkBindBufferMemory(VulkanDevice, VulkanBuffer, VulkanMemory, offset_en ? eOffsetAlignment : 0); BoundCurrent = true; InvalidDeleteEn = (eFreeInvalidHandle == aTestFlag); } } ~VkBufferTest() { if (CreateCurrent) { vkDestroyBuffer(VulkanDevice, VulkanBuffer, nullptr); } if (AllocateCurrent) { if (InvalidDeleteEn) { union { VkDeviceMemory device_memory; unsigned long long index_access; } bad_index; bad_index.device_memory = VulkanMemory; bad_index.index_access++; vkFreeMemory(VulkanDevice, bad_index.device_memory, nullptr); } vkFreeMemory(VulkanDevice, VulkanMemory, nullptr); } } bool GetBufferCurrent() { return AllocateCurrent && BoundCurrent && CreateCurrent; } const VkBuffer &GetBuffer() { return VulkanBuffer; } void TestDoubleDestroy() { // Destroy the buffer but leave the flag set, which will cause // the buffer to be destroyed again in the destructor. vkDestroyBuffer(VulkanDevice, VulkanBuffer, nullptr); } protected: bool AllocateCurrent; bool BoundCurrent; bool CreateCurrent; bool InvalidDeleteEn; VkBuffer VulkanBuffer; VkDevice VulkanDevice; VkDeviceMemory VulkanMemory; }; class VkVerticesObj { public: VkVerticesObj(VkDeviceObj *aVulkanDevice, unsigned aAttributeCount, unsigned aBindingCount, unsigned aByteStride, VkDeviceSize aVertexCount, const float *aVerticies) : BoundCurrent(false), AttributeCount(aAttributeCount), BindingCount(aBindingCount), BindId(BindIdGenerator), PipelineVertexInputStateCreateInfo(), VulkanMemoryBuffer(aVulkanDevice, static_cast(aByteStride * aVertexCount), reinterpret_cast(aVerticies), VK_BUFFER_USAGE_VERTEX_BUFFER_BIT) { BindIdGenerator++; // NB: This can wrap w/misuse VertexInputAttributeDescription = new VkVertexInputAttributeDescription[AttributeCount]; VertexInputBindingDescription = new VkVertexInputBindingDescription[BindingCount]; PipelineVertexInputStateCreateInfo.pVertexAttributeDescriptions = VertexInputAttributeDescription; PipelineVertexInputStateCreateInfo.vertexAttributeDescriptionCount = AttributeCount; PipelineVertexInputStateCreateInfo.pVertexBindingDescriptions = VertexInputBindingDescription; PipelineVertexInputStateCreateInfo.vertexBindingDescriptionCount = BindingCount; PipelineVertexInputStateCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; unsigned i = 0; do { VertexInputAttributeDescription[i].binding = BindId; VertexInputAttributeDescription[i].location = i; VertexInputAttributeDescription[i].format = VK_FORMAT_R32G32B32_SFLOAT; VertexInputAttributeDescription[i].offset = sizeof(float) * aByteStride; i++; } while (AttributeCount < i); i = 0; do { VertexInputBindingDescription[i].binding = BindId; VertexInputBindingDescription[i].stride = aByteStride; VertexInputBindingDescription[i].inputRate = VK_VERTEX_INPUT_RATE_VERTEX; i++; } while (BindingCount < i); } ~VkVerticesObj() { if (VertexInputAttributeDescription) { delete[] VertexInputAttributeDescription; } if (VertexInputBindingDescription) { delete[] VertexInputBindingDescription; } } bool AddVertexInputToPipe(VkPipelineObj &aPipelineObj) { aPipelineObj.AddVertexInputAttribs(VertexInputAttributeDescription, AttributeCount); aPipelineObj.AddVertexInputBindings(VertexInputBindingDescription, BindingCount); return true; } void BindVertexBuffers(VkCommandBuffer aCommandBuffer, unsigned aOffsetCount = 0, VkDeviceSize *aOffsetList = nullptr) { VkDeviceSize *offsetList; unsigned offsetCount; if (aOffsetCount) { offsetList = aOffsetList; offsetCount = aOffsetCount; } else { offsetList = new VkDeviceSize[1](); offsetCount = 1; } vkCmdBindVertexBuffers(aCommandBuffer, BindId, offsetCount, &VulkanMemoryBuffer.handle(), offsetList); BoundCurrent = true; if (!aOffsetCount) { delete[] offsetList; } } protected: static uint32_t BindIdGenerator; bool BoundCurrent; unsigned AttributeCount; unsigned BindingCount; uint32_t BindId; VkPipelineVertexInputStateCreateInfo PipelineVertexInputStateCreateInfo; VkVertexInputAttributeDescription *VertexInputAttributeDescription; VkVertexInputBindingDescription *VertexInputBindingDescription; VkConstantBufferObj VulkanMemoryBuffer; }; uint32_t VkVerticesObj::BindIdGenerator; struct OneOffDescriptorSet { VkDeviceObj *device_; VkDescriptorPool pool_; VkDescriptorSetLayoutObj layout_; VkDescriptorSet set_; typedef std::vector Bindings; OneOffDescriptorSet(VkDeviceObj *device, const Bindings &bindings) : device_{device}, pool_{}, layout_(device, bindings), set_{} { VkResult err; std::vector sizes; for (const auto &b : bindings) sizes.push_back({b.descriptorType, std::max(1u, b.descriptorCount)}); VkDescriptorPoolCreateInfo dspci = { VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO, nullptr, 0, 1, uint32_t(sizes.size()), sizes.data()}; err = vkCreateDescriptorPool(device_->handle(), &dspci, nullptr, &pool_); if (err != VK_SUCCESS) return; VkDescriptorSetAllocateInfo alloc_info = {VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, nullptr, pool_, 1, &layout_.handle()}; err = vkAllocateDescriptorSets(device_->handle(), &alloc_info, &set_); } ~OneOffDescriptorSet() { // No need to destroy set-- it's going away with the pool. vkDestroyDescriptorPool(device_->handle(), pool_, nullptr); } bool Initialized() { return pool_ != VK_NULL_HANDLE && layout_.initialized() && set_ != VK_NULL_HANDLE; } }; template bool IsValidVkStruct(const T &s) { return LvlTypeMap::kSType == s.sType; } // Helper class for tersely creating create pipeline tests // // Designed with minimal error checking to ensure easy error state creation // See OneshotTest for typical usage struct CreatePipelineHelper { public: std::vector dsl_bindings_; std::unique_ptr descriptor_set_; std::vector shader_stages_; VkPipelineVertexInputStateCreateInfo vi_ci_ = {}; VkPipelineInputAssemblyStateCreateInfo ia_ci_ = {}; VkPipelineTessellationStateCreateInfo tess_ci_ = {}; VkViewport viewport_ = {}; VkRect2D scissor_ = {}; VkPipelineViewportStateCreateInfo vp_state_ci_ = {}; VkPipelineMultisampleStateCreateInfo pipe_ms_state_ci_ = {}; VkPipelineLayoutCreateInfo pipeline_layout_ci_ = {}; VkPipelineLayoutObj pipeline_layout_; VkPipelineDynamicStateCreateInfo dyn_state_ci_ = {}; VkPipelineRasterizationStateCreateInfo rs_state_ci_ = {}; VkPipelineColorBlendAttachmentState cb_attachments_ = {}; VkPipelineColorBlendStateCreateInfo cb_ci_ = {}; VkGraphicsPipelineCreateInfo gp_ci_ = {}; VkPipelineCacheCreateInfo pc_ci_ = {}; VkPipeline pipeline_ = VK_NULL_HANDLE; VkPipelineCache pipeline_cache_ = VK_NULL_HANDLE; std::unique_ptr vs_; std::unique_ptr fs_; VkLayerTest &layer_test_; CreatePipelineHelper(VkLayerTest &test) : layer_test_(test) {} ~CreatePipelineHelper() { VkDevice device = layer_test_.device(); vkDestroyPipelineCache(device, pipeline_cache_, nullptr); vkDestroyPipeline(device, pipeline_, nullptr); } void InitDescriptorSetInfo() { dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}}; } void InitInputAndVertexInfo() { vi_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; ia_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; ia_ci_.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; } void InitMultisampleInfo() { pipe_ms_state_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; pipe_ms_state_ci_.pNext = nullptr; pipe_ms_state_ci_.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; pipe_ms_state_ci_.sampleShadingEnable = VK_FALSE; pipe_ms_state_ci_.minSampleShading = 1.0; pipe_ms_state_ci_.pSampleMask = NULL; } void InitPipelineLayoutInfo() { pipeline_layout_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipeline_layout_ci_.setLayoutCount = 1; // Not really changeable because InitState() sets exactly one pSetLayout pipeline_layout_ci_.pSetLayouts = nullptr; // must bound after it is created } void InitViewportInfo() { viewport_ = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; scissor_ = {{0, 0}, {64, 64}}; vp_state_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; vp_state_ci_.pNext = nullptr; vp_state_ci_.viewportCount = 1; vp_state_ci_.pViewports = &viewport_; // ignored if dynamic vp_state_ci_.scissorCount = 1; vp_state_ci_.pScissors = &scissor_; // ignored if dynamic } void InitDynamicStateInfo() { // Use a "validity" check on the {} initialized structure to detect initialization // during late bind } void InitShaderInfo() { vs_.reset(new VkShaderObj(layer_test_.DeviceObj(), bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, &layer_test_)); fs_.reset(new VkShaderObj(layer_test_.DeviceObj(), bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, &layer_test_)); // We shouldn't need a fragment shader but add it to be able to run on more devices shader_stages_ = {vs_->GetStageCreateInfo(), fs_->GetStageCreateInfo()}; } void InitRasterizationInfo() { rs_state_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rs_state_ci_.pNext = nullptr; rs_state_ci_.flags = 0; rs_state_ci_.depthClampEnable = VK_FALSE; rs_state_ci_.rasterizerDiscardEnable = VK_FALSE; rs_state_ci_.polygonMode = VK_POLYGON_MODE_FILL; rs_state_ci_.cullMode = VK_CULL_MODE_BACK_BIT; rs_state_ci_.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE; rs_state_ci_.depthBiasEnable = VK_FALSE; rs_state_ci_.lineWidth = 1.0F; } void InitBlendStateInfo() { cb_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; cb_ci_.logicOpEnable = VK_FALSE; cb_ci_.logicOp = VK_LOGIC_OP_COPY; // ignored if enable is VK_FALSE above cb_ci_.attachmentCount = layer_test_.RenderPassInfo().subpassCount; ASSERT_TRUE(IsValidVkStruct(layer_test_.RenderPassInfo())); cb_ci_.pAttachments = &cb_attachments_; for (int i = 0; i < 4; i++) { cb_ci_.blendConstants[0] = 1.0F; } } void InitGraphicsPipelineInfo() { // Color-only rendering in a subpass with no depth/stencil attachment // Active Pipeline Shader Stages // Vertex Shader // Fragment Shader // Required: Fixed-Function Pipeline Stages // VkPipelineVertexInputStateCreateInfo // VkPipelineInputAssemblyStateCreateInfo // VkPipelineViewportStateCreateInfo // VkPipelineRasterizationStateCreateInfo // VkPipelineMultisampleStateCreateInfo // VkPipelineColorBlendStateCreateInfo gp_ci_.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; gp_ci_.pNext = nullptr; gp_ci_.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT; gp_ci_.pVertexInputState = &vi_ci_; gp_ci_.pInputAssemblyState = &ia_ci_; gp_ci_.pTessellationState = nullptr; gp_ci_.pViewportState = &vp_state_ci_; gp_ci_.pRasterizationState = &rs_state_ci_; gp_ci_.pMultisampleState = &pipe_ms_state_ci_; gp_ci_.pDepthStencilState = nullptr; gp_ci_.pColorBlendState = &cb_ci_; gp_ci_.pDynamicState = nullptr; gp_ci_.renderPass = layer_test_.renderPass(); } void InitPipelineCacheInfo() { pc_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; pc_ci_.pNext = nullptr; pc_ci_.flags = 0; pc_ci_.initialDataSize = 0; pc_ci_.pInitialData = nullptr; } // Not called by default during init_info void InitTesselationState() { // TBD -- add shaders and create_info } // TDB -- add control for optional and/or additional initialization void InitInfo() { InitDescriptorSetInfo(); InitInputAndVertexInfo(); InitMultisampleInfo(); InitPipelineLayoutInfo(); InitViewportInfo(); InitDynamicStateInfo(); InitShaderInfo(); InitRasterizationInfo(); InitBlendStateInfo(); InitGraphicsPipelineInfo(); InitPipelineCacheInfo(); } void InitState() { VkResult err; descriptor_set_.reset(new OneOffDescriptorSet(layer_test_.DeviceObj(), dsl_bindings_)); ASSERT_TRUE(descriptor_set_->Initialized()); const std::vector push_ranges( pipeline_layout_ci_.pPushConstantRanges, pipeline_layout_ci_.pPushConstantRanges + pipeline_layout_ci_.pushConstantRangeCount); pipeline_layout_ = VkPipelineLayoutObj(layer_test_.DeviceObj(), {&descriptor_set_->layout_}, push_ranges); err = vkCreatePipelineCache(layer_test_.device(), &pc_ci_, NULL, &pipeline_cache_); ASSERT_VK_SUCCESS(err); } void LateBindPipelineInfo() { // By value or dynamically located items must be late bound gp_ci_.layout = pipeline_layout_.handle(); gp_ci_.stageCount = shader_stages_.size(); gp_ci_.pStages = shader_stages_.data(); if ((gp_ci_.pTessellationState == nullptr) && IsValidVkStruct(tess_ci_)) { gp_ci_.pTessellationState = &tess_ci_; } if ((gp_ci_.pDynamicState == nullptr) && IsValidVkStruct(dyn_state_ci_)) { gp_ci_.pDynamicState = &dyn_state_ci_; } } VkResult CreateGraphicsPipeline(bool implicit_destroy = true, bool do_late_bind = true) { VkResult err; if (do_late_bind) { LateBindPipelineInfo(); } if (implicit_destroy && (pipeline_ != VK_NULL_HANDLE)) { vkDestroyPipeline(layer_test_.device(), pipeline_, nullptr); pipeline_ = VK_NULL_HANDLE; } err = vkCreateGraphicsPipelines(layer_test_.device(), pipeline_cache_, 1, &gp_ci_, NULL, &pipeline_); return err; } // Helper function to create a simple test case (positive or negative) // // info_override can be any callable that takes a CreatePipelineHeper & // flags, error can be any args accepted by "SetDesiredFailure". template static void OneshotTest(Test &test, OverrideFunc &info_override, const VkFlags flags, const std::vector &errors, bool positive_test = false) { CreatePipelineHelper helper(test); helper.InitInfo(); info_override(helper); helper.InitState(); for (const auto &error : errors) test.Monitor()->SetDesiredFailureMsg(flags, error); helper.CreateGraphicsPipeline(); if (positive_test) { test.Monitor()->VerifyNotFound(); } else { test.Monitor()->VerifyFound(); } } template static void OneshotTest(Test &test, OverrideFunc &info_override, const VkFlags flags, Error error, bool positive_test = false) { OneshotTest(test, info_override, flags, std::vector(1, error), positive_test); } }; namespace chain_util { template T Init(const void *pnext_in = nullptr) { T pnext_obj = {}; pnext_obj.sType = LvlTypeMap::kSType; pnext_obj.pNext = pnext_in; return pnext_obj; } class ExtensionChain { const void *head_ = nullptr; typedef std::function AddIfFunction; AddIfFunction add_if_; typedef std::vector List; List *list_; public: template ExtensionChain(F &add_if, List *list) : add_if_(add_if), list_(list) {} template void Add(const char *name, T &obj) { if (add_if_(name)) { if (list_) { list_->push_back(name); } obj.pNext = head_; head_ = &obj; } } const void *Head() const { return head_; } }; } // namespace chain_util // PushDescriptorProperties helper VkPhysicalDevicePushDescriptorPropertiesKHR GetPushDescriptorProperties(VkInstance instance, VkPhysicalDevice gpu) { // Find address of extension call and make the call -- assumes needed extensions are enabled. PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR = (PFN_vkGetPhysicalDeviceProperties2KHR)vkGetInstanceProcAddr(instance, "vkGetPhysicalDeviceProperties2KHR"); assert(vkGetPhysicalDeviceProperties2KHR != nullptr); // Get the push descriptor limits auto push_descriptor_prop = lvl_init_struct(); auto prop2 = lvl_init_struct(&push_descriptor_prop); vkGetPhysicalDeviceProperties2KHR(gpu, &prop2); return push_descriptor_prop; } // ******************************************************************************************************************** // ******************************************************************************************************************** // ******************************************************************************************************************** // ******************************************************************************************************************** TEST_F(VkLayerTest, RequiredParameter) { TEST_DESCRIPTION("Specify VK_NULL_HANDLE, NULL, and 0 for required handle, pointer, array, and array count parameters"); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "required parameter pFeatures specified as NULL"); // Specify NULL for a pointer to a handle // Expected to trigger an error with // parameter_validation::validate_required_pointer vkGetPhysicalDeviceFeatures(gpu(), NULL); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "required parameter pQueueFamilyPropertyCount specified as NULL"); // Specify NULL for pointer to array count // Expected to trigger an error with parameter_validation::validate_array vkGetPhysicalDeviceQueueFamilyProperties(gpu(), NULL, NULL); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-viewportCount-arraylength"); // Specify 0 for a required array count // Expected to trigger an error with parameter_validation::validate_array VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; m_commandBuffer->SetViewport(0, 0, &viewport); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCreateImage-pCreateInfo-parameter"); // Specify a null pImageCreateInfo struct pointer VkImage test_image; vkCreateImage(device(), NULL, NULL, &test_image); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-pViewports-parameter"); // Specify NULL for a required array // Expected to trigger an error with parameter_validation::validate_array m_commandBuffer->SetViewport(0, 1, NULL); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "required parameter memory specified as VK_NULL_HANDLE"); // Specify VK_NULL_HANDLE for a required handle // Expected to trigger an error with // parameter_validation::validate_required_handle vkUnmapMemory(device(), VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "required parameter pFences[0] specified as VK_NULL_HANDLE"); // Specify VK_NULL_HANDLE for a required handle array entry // Expected to trigger an error with // parameter_validation::validate_required_handle_array VkFence fence = VK_NULL_HANDLE; vkResetFences(device(), 1, &fence); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "required parameter pAllocateInfo specified as NULL"); // Specify NULL for a required struct pointer // Expected to trigger an error with // parameter_validation::validate_struct_type VkDeviceMemory memory = VK_NULL_HANDLE; vkAllocateMemory(device(), NULL, NULL, &memory); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "value of faceMask must not be 0"); // Specify 0 for a required VkFlags parameter // Expected to trigger an error with parameter_validation::validate_flags m_commandBuffer->SetStencilReference(0, 0); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "value of pSubmits[0].pWaitDstStageMask[0] must not be 0"); // Specify 0 for a required VkFlags array entry // Expected to trigger an error with // parameter_validation::validate_flags_array VkSemaphore semaphore = VK_NULL_HANDLE; VkPipelineStageFlags stageFlags = 0; VkSubmitInfo submitInfo = {}; submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submitInfo.waitSemaphoreCount = 1; submitInfo.pWaitSemaphores = &semaphore; submitInfo.pWaitDstStageMask = &stageFlags; vkQueueSubmit(m_device->m_queue, 1, &submitInfo, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubmitInfo-sType-sType"); stageFlags = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; // Set a bogus sType and see what happens submitInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; submitInfo.waitSemaphoreCount = 1; submitInfo.pWaitSemaphores = &semaphore; submitInfo.pWaitDstStageMask = &stageFlags; vkQueueSubmit(m_device->m_queue, 1, &submitInfo, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubmitInfo-pWaitSemaphores-parameter"); stageFlags = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submitInfo.waitSemaphoreCount = 1; // Set a null pointer for pWaitSemaphores submitInfo.pWaitSemaphores = NULL; submitInfo.pWaitDstStageMask = &stageFlags; vkQueueSubmit(m_device->m_queue, 1, &submitInfo, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, PnextOnlyStructValidation) { TEST_DESCRIPTION("See if checks occur on structs ONLY used in pnext chains."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); std::array required_device_extensions = { {VK_KHR_MAINTENANCE3_EXTENSION_NAME, VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME}}; for (auto device_extension : required_device_extensions) { if (DeviceExtensionSupported(gpu(), nullptr, device_extension)) { m_device_extension_names.push_back(device_extension); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, device_extension); return; } } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vkGetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); // Create a device passing in a bad PdevFeatures2 value auto indexing_features = lvl_init_struct(); auto features2 = lvl_init_struct(&indexing_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); // Set one of the features values to an invalid boolean value indexing_features.descriptorBindingUniformBufferUpdateAfterBind = 800; uint32_t queue_node_count; vkGetPhysicalDeviceQueueFamilyProperties(gpu(), &queue_node_count, NULL); VkQueueFamilyProperties *queue_props = new VkQueueFamilyProperties[queue_node_count]; vkGetPhysicalDeviceQueueFamilyProperties(gpu(), &queue_node_count, queue_props); float priorities[] = {1.0f}; VkDeviceQueueCreateInfo queue_info{}; queue_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; queue_info.pNext = NULL; queue_info.flags = 0; queue_info.queueFamilyIndex = 0; queue_info.queueCount = 1; queue_info.pQueuePriorities = &priorities[0]; VkDeviceCreateInfo dev_info = {}; dev_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; dev_info.pNext = NULL; dev_info.queueCreateInfoCount = 1; dev_info.pQueueCreateInfos = &queue_info; dev_info.enabledLayerCount = 0; dev_info.ppEnabledLayerNames = NULL; dev_info.enabledExtensionCount = m_device_extension_names.size(); dev_info.ppEnabledExtensionNames = m_device_extension_names.data(); dev_info.pNext = &features2; VkDevice dev; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "is neither VK_TRUE nor VK_FALSE"); m_errorMonitor->SetUnexpectedError("Failed to create"); vkCreateDevice(gpu(), &dev_info, NULL, &dev); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ReservedParameter) { TEST_DESCRIPTION("Specify a non-zero value for a reserved parameter"); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " must be 0"); // Specify 0 for a reserved VkFlags parameter // Expected to trigger an error with // parameter_validation::validate_reserved_flags VkEvent event_handle = VK_NULL_HANDLE; VkEventCreateInfo event_info = {}; event_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; event_info.flags = 1; vkCreateEvent(device(), &event_info, NULL, &event_handle); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DebugMarkerNameTest) { ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), "VK_LAYER_LUNARG_core_validation", VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_EXT_DEBUG_MARKER_EXTENSION_NAME); } else { printf("%s Debug Marker Extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); PFN_vkDebugMarkerSetObjectNameEXT fpvkDebugMarkerSetObjectNameEXT = (PFN_vkDebugMarkerSetObjectNameEXT)vkGetInstanceProcAddr(instance(), "vkDebugMarkerSetObjectNameEXT"); if (!(fpvkDebugMarkerSetObjectNameEXT)) { printf("%s Can't find fpvkDebugMarkerSetObjectNameEXT; skipped.\n", kSkipPrefix); return; } VkEvent event_handle = VK_NULL_HANDLE; VkEventCreateInfo event_info = {}; event_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; vkCreateEvent(device(), &event_info, NULL, &event_handle); VkDebugMarkerObjectNameInfoEXT name_info = {}; name_info.sType = VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT; name_info.pNext = nullptr; name_info.object = (uint64_t)event_handle; name_info.objectType = VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT; name_info.pObjectName = "UnimaginablyImprobableString"; fpvkDebugMarkerSetObjectNameEXT(device(), &name_info); m_commandBuffer->begin(); vkCmdSetEvent(m_commandBuffer->handle(), event_handle, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT); m_commandBuffer->end(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "UnimaginablyImprobableString"); vkDestroyEvent(m_device->device(), event_handle, NULL); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); VkBuffer buffer; VkDeviceMemory memory_1, memory_2; std::string memory_name = "memory_name"; VkBufferCreateInfo buffer_create_info = {}; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buffer_create_info.size = 1; vkCreateBuffer(device(), &buffer_create_info, nullptr, &buffer); VkMemoryRequirements memRequirements; vkGetBufferMemoryRequirements(device(), buffer, &memRequirements); VkMemoryAllocateInfo memory_allocate_info = {}; memory_allocate_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_allocate_info.allocationSize = memRequirements.size; memory_allocate_info.memoryTypeIndex = 0; vkAllocateMemory(device(), &memory_allocate_info, nullptr, &memory_1); vkAllocateMemory(device(), &memory_allocate_info, nullptr, &memory_2); name_info.object = (uint64_t)memory_2; name_info.objectType = VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT; name_info.pObjectName = memory_name.c_str(); fpvkDebugMarkerSetObjectNameEXT(device(), &name_info); vkBindBufferMemory(device(), buffer, memory_1, 0); // Test core_validation layer m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, memory_name); vkBindBufferMemory(device(), buffer, memory_2, 0); m_errorMonitor->VerifyFound(); vkFreeMemory(device(), memory_1, nullptr); memory_1 = VK_NULL_HANDLE; vkFreeMemory(device(), memory_2, nullptr); memory_2 = VK_NULL_HANDLE; vkDestroyBuffer(device(), buffer, nullptr); buffer = VK_NULL_HANDLE; VkCommandBuffer commandBuffer; std::string commandBuffer_name = "command_buffer_name"; VkCommandPool commandpool_1; VkCommandPool commandpool_2; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(device(), &pool_create_info, nullptr, &commandpool_1); vkCreateCommandPool(device(), &pool_create_info, nullptr, &commandpool_2); VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = commandpool_1; command_buffer_allocate_info.commandBufferCount = 1; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(device(), &command_buffer_allocate_info, &commandBuffer); name_info.object = (uint64_t)commandBuffer; name_info.objectType = VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT; name_info.pObjectName = commandBuffer_name.c_str(); fpvkDebugMarkerSetObjectNameEXT(device(), &name_info); VkCommandBufferBeginInfo cb_begin_Info = {}; cb_begin_Info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cb_begin_Info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; vkBeginCommandBuffer(commandBuffer, &cb_begin_Info); const VkRect2D scissor = {{-1, 0}, {16, 16}}; const VkRect2D scissors[] = {scissor, scissor}; // Test parameter_validation layer m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, commandBuffer_name); vkCmdSetScissor(commandBuffer, 1, 1, scissors); m_errorMonitor->VerifyFound(); // Test object_tracker layer m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, commandBuffer_name); vkFreeCommandBuffers(device(), commandpool_2, 1, &commandBuffer); m_errorMonitor->VerifyFound(); vkDestroyCommandPool(device(), commandpool_1, NULL); vkDestroyCommandPool(device(), commandpool_2, NULL); } TEST_F(VkLayerTest, DebugUtilsNameTest) { // Check for external semaphore instance extensions if (InstanceExtensionSupported(VK_EXT_DEBUG_UTILS_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_EXT_DEBUG_UTILS_EXTENSION_NAME); } else { printf("%s Debug Utils Extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(InitState()); PFN_vkSetDebugUtilsObjectNameEXT fpvkSetDebugUtilsObjectNameEXT = (PFN_vkSetDebugUtilsObjectNameEXT)vkGetDeviceProcAddr(m_device->device(), "vkSetDebugUtilsObjectNameEXT"); if (!(fpvkSetDebugUtilsObjectNameEXT)) { printf("%s Can't find fpvkSetDebugUtilsObjectNameEXT; skipped.\n", kSkipPrefix); return; } VkEvent event_handle = VK_NULL_HANDLE; VkEventCreateInfo event_info = {}; event_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; vkCreateEvent(device(), &event_info, NULL, &event_handle); VkDebugUtilsObjectNameInfoEXT name_info = {}; name_info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT; name_info.pNext = nullptr; name_info.objectHandle = (uint64_t)event_handle; name_info.objectType = VK_OBJECT_TYPE_EVENT; name_info.pObjectName = "Popbutton_T_Bumfuzzle"; fpvkSetDebugUtilsObjectNameEXT(device(), &name_info); m_commandBuffer->begin(); vkCmdSetEvent(m_commandBuffer->handle(), event_handle, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT); m_commandBuffer->end(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); // Provoke an error from the core_validation layer m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Popbutton_T_Bumfuzzle"); vkDestroyEvent(m_device->device(), event_handle, NULL); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); // Provoke an error from the object tracker layer m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Popbutton_T_Bumfuzzle"); vkDestroyEvent(m_device->device(), event_handle, NULL); m_errorMonitor->VerifyFound(); // Change label for a given object, then provoke an error from core_validation and look for the new name name_info.pObjectName = "A_Totally_Different_Name"; fpvkSetDebugUtilsObjectNameEXT(device(), &name_info); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "A_Totally_Different_Name"); vkDestroyEvent(m_device->device(), event_handle, NULL); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); } TEST_F(VkLayerTest, InvalidStructSType) { TEST_DESCRIPTION("Specify an invalid VkStructureType for a Vulkan structure's sType field"); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "parameter pAllocateInfo->sType must be"); // Zero struct memory, effectively setting sType to // VK_STRUCTURE_TYPE_APPLICATION_INFO // Expected to trigger an error with // parameter_validation::validate_struct_type VkMemoryAllocateInfo alloc_info = {}; VkDeviceMemory memory = VK_NULL_HANDLE; vkAllocateMemory(device(), &alloc_info, NULL, &memory); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "parameter pSubmits[0].sType must be"); // Zero struct memory, effectively setting sType to // VK_STRUCTURE_TYPE_APPLICATION_INFO // Expected to trigger an error with // parameter_validation::validate_struct_type_array VkSubmitInfo submit_info = {}; vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidStructPNext) { TEST_DESCRIPTION("Specify an invalid value for a Vulkan structure's pNext field"); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "value of pCreateInfo->pNext must be NULL"); // Set VkMemoryAllocateInfo::pNext to a non-NULL value, when pNext must be NULL. // Need to pick a function that has no allowed pNext structure types. // Expected to trigger an error with parameter_validation::validate_struct_pnext VkEvent event = VK_NULL_HANDLE; VkEventCreateInfo event_alloc_info = {}; // Zero-initialization will provide the correct sType VkApplicationInfo app_info = {}; event_alloc_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; event_alloc_info.pNext = &app_info; vkCreateEvent(device(), &event_alloc_info, NULL, &event); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, " chain includes a structure with unexpected VkStructureType "); // Set VkMemoryAllocateInfo::pNext to a non-NULL value, but use // a function that has allowed pNext structure types and specify // a structure type that is not allowed. // Expected to trigger an error with parameter_validation::validate_struct_pnext VkDeviceMemory memory = VK_NULL_HANDLE; VkMemoryAllocateInfo memory_alloc_info = {}; memory_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_alloc_info.pNext = &app_info; vkAllocateMemory(device(), &memory_alloc_info, NULL, &memory); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, UnrecognizedValueOutOfRange) { ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "does not fall within the begin..end range of the core VkFormat enumeration tokens"); // Specify an invalid VkFormat value // Expected to trigger an error with // parameter_validation::validate_ranged_enum VkFormatProperties format_properties; vkGetPhysicalDeviceFormatProperties(gpu(), static_cast(8000), &format_properties); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, UnrecognizedValueBadMask) { ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "contains flag bits that are not recognized members of"); // Specify an invalid VkFlags bitmask value // Expected to trigger an error with parameter_validation::validate_flags VkImageFormatProperties image_format_properties; vkGetPhysicalDeviceImageFormatProperties(gpu(), VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL, static_cast(1 << 25), 0, &image_format_properties); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, UnrecognizedValueBadFlag) { ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "contains flag bits that are not recognized members of"); // Specify an invalid VkFlags array entry // Expected to trigger an error with parameter_validation::validate_flags_array VkSemaphore semaphore; VkSemaphoreCreateInfo semaphore_create_info{}; semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore); // `stage_flags` is set to a value which, currently, is not a defined stage flag // `VK_IMAGE_ASPECT_FLAG_BITS_MAX_ENUM` works well for this VkPipelineStageFlags stage_flags = VK_IMAGE_ASPECT_FLAG_BITS_MAX_ENUM; // `waitSemaphoreCount` *must* be greater than 0 to perform this check VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.waitSemaphoreCount = 1; submit_info.pWaitSemaphores = &semaphore; submit_info.pWaitDstStageMask = &stage_flags; vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); vkDestroySemaphore(m_device->device(), semaphore, nullptr); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, UnrecognizedValueBadBool) { // Make sure using VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE doesn't trigger a false positive. ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME); } else { printf("%s VK_KHR_sampler_mirror_clamp_to_edge extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "is neither VK_TRUE nor VK_FALSE"); // Specify an invalid VkBool32 value, expecting a warning with parameter_validation::validate_bool32 VkSampler sampler = VK_NULL_HANDLE; VkSamplerCreateInfo sampler_info = SafeSaneSamplerCreateInfo(); sampler_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; sampler_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; sampler_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; // Not VK_TRUE or VK_FALSE sampler_info.anisotropyEnable = 3; vkCreateSampler(m_device->device(), &sampler_info, NULL, &sampler); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, MirrorClampToEdgeNotEnabled) { TEST_DESCRIPTION("Validation should catch using CLAMP_TO_EDGE addressing mode if the extension is not enabled."); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSamplerCreateInfo-addressModeU-01079"); VkSampler sampler = VK_NULL_HANDLE; VkSamplerCreateInfo sampler_info = SafeSaneSamplerCreateInfo(); // Set the modes to cause the error sampler_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; sampler_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; sampler_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; vkCreateSampler(m_device->device(), &sampler_info, NULL, &sampler); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, AnisotropyFeatureDisabled) { TEST_DESCRIPTION("Validation should check anisotropy parameters are correct with samplerAnisotropy disabled."); // Determine if required device features are available VkPhysicalDeviceFeatures device_features = {}; ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features)); device_features.samplerAnisotropy = VK_FALSE; // force anisotropy off ASSERT_NO_FATAL_FAILURE(InitState(&device_features)); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSamplerCreateInfo-anisotropyEnable-01070"); VkSamplerCreateInfo sampler_info = SafeSaneSamplerCreateInfo(); // With the samplerAnisotropy disable, the sampler must not enable it. sampler_info.anisotropyEnable = VK_TRUE; VkSampler sampler = VK_NULL_HANDLE; VkResult err; err = vkCreateSampler(m_device->device(), &sampler_info, NULL, &sampler); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == err) { vkDestroySampler(m_device->device(), sampler, NULL); } sampler = VK_NULL_HANDLE; } TEST_F(VkLayerTest, AnisotropyFeatureEnabled) { TEST_DESCRIPTION("Validation must check several conditions that apply only when Anisotropy is enabled."); // Determine if required device features are available VkPhysicalDeviceFeatures device_features = {}; ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features)); // These tests require that the device support anisotropic filtering if (VK_TRUE != device_features.samplerAnisotropy) { printf("%s Test requires unsupported samplerAnisotropy feature. Skipped.\n", kSkipPrefix); return; } bool cubic_support = false; if (DeviceExtensionSupported(gpu(), nullptr, "VK_IMG_filter_cubic")) { m_device_extension_names.push_back("VK_IMG_filter_cubic"); cubic_support = true; } VkSamplerCreateInfo sampler_info_ref = SafeSaneSamplerCreateInfo(); sampler_info_ref.anisotropyEnable = VK_TRUE; VkSamplerCreateInfo sampler_info = sampler_info_ref; ASSERT_NO_FATAL_FAILURE(InitState()); auto do_test = [this](std::string code, const VkSamplerCreateInfo *pCreateInfo) -> void { VkResult err; VkSampler sampler = VK_NULL_HANDLE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, code); err = vkCreateSampler(m_device->device(), pCreateInfo, NULL, &sampler); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == err) { vkDestroySampler(m_device->device(), sampler, NULL); } }; // maxAnisotropy out-of-bounds low. sampler_info.maxAnisotropy = NearestSmaller(1.0F); do_test("VUID-VkSamplerCreateInfo-anisotropyEnable-01071", &sampler_info); sampler_info.maxAnisotropy = sampler_info_ref.maxAnisotropy; // maxAnisotropy out-of-bounds high. sampler_info.maxAnisotropy = NearestGreater(m_device->phy().properties().limits.maxSamplerAnisotropy); do_test("VUID-VkSamplerCreateInfo-anisotropyEnable-01071", &sampler_info); sampler_info.maxAnisotropy = sampler_info_ref.maxAnisotropy; // Both anisotropy and unnormalized coords enabled sampler_info.unnormalizedCoordinates = VK_TRUE; // If unnormalizedCoordinates is VK_TRUE, minLod and maxLod must be zero sampler_info.minLod = 0; sampler_info.maxLod = 0; do_test("VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01076", &sampler_info); sampler_info.unnormalizedCoordinates = sampler_info_ref.unnormalizedCoordinates; // Both anisotropy and cubic filtering enabled if (cubic_support) { sampler_info.minFilter = VK_FILTER_CUBIC_IMG; do_test("VUID-VkSamplerCreateInfo-magFilter-01081", &sampler_info); sampler_info.minFilter = sampler_info_ref.minFilter; sampler_info.magFilter = VK_FILTER_CUBIC_IMG; do_test("VUID-VkSamplerCreateInfo-magFilter-01081", &sampler_info); sampler_info.magFilter = sampler_info_ref.magFilter; } else { printf("%s Test requires unsupported extension \"VK_IMG_filter_cubic\". Skipped.\n", kSkipPrefix); } } TEST_F(VkLayerTest, UnnormalizedCoordinatesEnabled) { TEST_DESCRIPTION("Validate restrictions on sampler parameters when unnormalizedCoordinates is true."); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); VkSamplerCreateInfo sampler_info_ref = SafeSaneSamplerCreateInfo(); sampler_info_ref.unnormalizedCoordinates = VK_TRUE; sampler_info_ref.minLod = 0.0f; sampler_info_ref.maxLod = 0.0f; VkSamplerCreateInfo sampler_info = sampler_info_ref; ASSERT_NO_FATAL_FAILURE(InitState()); auto do_test = [this](std::string code, const VkSamplerCreateInfo *pCreateInfo) -> void { VkResult err; VkSampler sampler = VK_NULL_HANDLE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, code); err = vkCreateSampler(m_device->device(), pCreateInfo, NULL, &sampler); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == err) { vkDestroySampler(m_device->device(), sampler, NULL); } }; // min and mag filters must be the same sampler_info.minFilter = VK_FILTER_NEAREST; sampler_info.magFilter = VK_FILTER_LINEAR; do_test("VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01072", &sampler_info); std::swap(sampler_info.minFilter, sampler_info.magFilter); do_test("VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01072", &sampler_info); sampler_info = sampler_info_ref; // mipmapMode must be NEAREST sampler_info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR; do_test("VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01073", &sampler_info); sampler_info = sampler_info_ref; // minlod and maxlod must be zero sampler_info.maxLod = 3.14159f; do_test("VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01074", &sampler_info); sampler_info.minLod = 2.71828f; do_test("VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01074", &sampler_info); sampler_info = sampler_info_ref; // addressModeU and addressModeV must both be CLAMP_TO_EDGE or CLAMP_TO_BORDER // checks all 12 invalid combinations out of 16 total combinations const std::array kAddressModes = {{ VK_SAMPLER_ADDRESS_MODE_REPEAT, VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT, VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, }}; for (const auto umode : kAddressModes) { for (const auto vmode : kAddressModes) { if ((umode != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE && umode != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER) || (vmode != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE && vmode != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER)) { sampler_info.addressModeU = umode; sampler_info.addressModeV = vmode; do_test("VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01075", &sampler_info); } } } sampler_info = sampler_info_ref; // VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01076 is tested in AnisotropyFeatureEnabled above // Since it requires checking/enabling the anisotropic filtering feature, it's easier to do it // with the other anisotropic tests. // compareEnable must be VK_FALSE sampler_info.compareEnable = VK_TRUE; do_test("VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01077", &sampler_info); sampler_info = sampler_info_ref; } TEST_F(VkLayerTest, UnrecognizedValueMaxEnum) { ASSERT_NO_FATAL_FAILURE(Init()); // Specify MAX_ENUM VkFormatProperties format_properties; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "does not fall within the begin..end range"); vkGetPhysicalDeviceFormatProperties(gpu(), VK_FORMAT_MAX_ENUM, &format_properties); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, UpdateBufferAlignment) { TEST_DESCRIPTION("Check alignment parameters for vkCmdUpdateBuffer"); uint32_t updateData[] = {1, 2, 3, 4, 5, 6, 7, 8}; ASSERT_NO_FATAL_FAILURE(Init()); VkMemoryPropertyFlags reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; VkBufferObj buffer; buffer.init_as_dst(*m_device, (VkDeviceSize)20, reqs); m_commandBuffer->begin(); // Introduce failure by using dstOffset that is not multiple of 4 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " is not a multiple of 4"); m_commandBuffer->UpdateBuffer(buffer.handle(), 1, 4, updateData); m_errorMonitor->VerifyFound(); // Introduce failure by using dataSize that is not multiple of 4 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " is not a multiple of 4"); m_commandBuffer->UpdateBuffer(buffer.handle(), 0, 6, updateData); m_errorMonitor->VerifyFound(); // Introduce failure by using dataSize that is < 0 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "must be greater than zero and less than or equal to 65536"); m_commandBuffer->UpdateBuffer(buffer.handle(), 0, (VkDeviceSize)-44, updateData); m_errorMonitor->VerifyFound(); // Introduce failure by using dataSize that is > 65536 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "must be greater than zero and less than or equal to 65536"); m_commandBuffer->UpdateBuffer(buffer.handle(), 0, (VkDeviceSize)80000, updateData); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, FillBufferAlignment) { TEST_DESCRIPTION("Check alignment parameters for vkCmdFillBuffer"); ASSERT_NO_FATAL_FAILURE(Init()); VkMemoryPropertyFlags reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; VkBufferObj buffer; buffer.init_as_dst(*m_device, (VkDeviceSize)20, reqs); m_commandBuffer->begin(); // Introduce failure by using dstOffset that is not multiple of 4 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " is not a multiple of 4"); m_commandBuffer->FillBuffer(buffer.handle(), 1, 4, 0x11111111); m_errorMonitor->VerifyFound(); // Introduce failure by using size that is not multiple of 4 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " is not a multiple of 4"); m_commandBuffer->FillBuffer(buffer.handle(), 0, 6, 0x11111111); m_errorMonitor->VerifyFound(); // Introduce failure by using size that is zero m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "must be greater than zero"); m_commandBuffer->FillBuffer(buffer.handle(), 0, 0, 0x11111111); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, PSOPolygonModeInvalid) { TEST_DESCRIPTION("Attempt to use a non-solid polygon fill mode in a pipeline when this feature is not enabled."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); std::vector device_extension_names; auto features = m_device->phy().features(); // Artificially disable support for non-solid fill modes features.fillModeNonSolid = VK_FALSE; // The sacrificial device object VkDeviceObj test_device(0, gpu(), device_extension_names, &features); VkRenderpassObj render_pass(&test_device); const VkPipelineLayoutObj pipeline_layout(&test_device); VkPipelineRasterizationStateCreateInfo rs_ci = {}; rs_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rs_ci.pNext = nullptr; rs_ci.lineWidth = 1.0f; rs_ci.rasterizerDiscardEnable = VK_TRUE; VkShaderObj vs(&test_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(&test_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); // Set polygonMode to unsupported value POINT, should fail m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "polygonMode cannot be VK_POLYGON_MODE_POINT or VK_POLYGON_MODE_LINE"); { VkPipelineObj pipe(&test_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); // Introduce failure by setting unsupported polygon mode rs_ci.polygonMode = VK_POLYGON_MODE_POINT; pipe.SetRasterization(&rs_ci); pipe.CreateVKPipeline(pipeline_layout.handle(), render_pass.handle()); } m_errorMonitor->VerifyFound(); // Try again with polygonMode=LINE, should fail m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "polygonMode cannot be VK_POLYGON_MODE_POINT or VK_POLYGON_MODE_LINE"); { VkPipelineObj pipe(&test_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); // Introduce failure by setting unsupported polygon mode rs_ci.polygonMode = VK_POLYGON_MODE_LINE; pipe.SetRasterization(&rs_ci); pipe.CreateVKPipeline(pipeline_layout.handle(), render_pass.handle()); } m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, SparseBindingImageBufferCreate) { TEST_DESCRIPTION("Create buffer/image with sparse attributes but without the sparse_binding bit set"); ASSERT_NO_FATAL_FAILURE(Init()); VkBuffer buffer; VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.pNext = NULL; buf_info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; buf_info.size = 2048; buf_info.queueFamilyIndexCount = 0; buf_info.pQueueFamilyIndices = NULL; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; if (m_device->phy().features().sparseResidencyBuffer) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferCreateInfo-flags-00918"); buf_info.flags = VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT; vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer); m_errorMonitor->VerifyFound(); } else { printf("%s Test requires unsupported sparseResidencyBuffer feature. Skipped.\n", kSkipPrefix); return; } if (m_device->phy().features().sparseResidencyAliased) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferCreateInfo-flags-00918"); buf_info.flags = VK_BUFFER_CREATE_SPARSE_ALIASED_BIT; vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer); m_errorMonitor->VerifyFound(); } else { printf("%s Test requires unsupported sparseResidencyAliased feature. Skipped.\n", kSkipPrefix); return; } VkImage image; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM; image_create_info.extent.width = 512; image_create_info.extent.height = 64; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image_create_info.queueFamilyIndexCount = 0; image_create_info.pQueueFamilyIndices = NULL; image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; if (m_device->phy().features().sparseResidencyImage2D) { image_create_info.flags = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-flags-00987"); vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); } else { printf("%s Test requires unsupported sparseResidencyImage2D feature. Skipped.\n", kSkipPrefix); return; } if (m_device->phy().features().sparseResidencyAliased) { image_create_info.flags = VK_IMAGE_CREATE_SPARSE_ALIASED_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-flags-00987"); vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); } else { printf("%s Test requires unsupported sparseResidencyAliased feature. Skipped.\n", kSkipPrefix); return; } } TEST_F(VkLayerTest, SparseResidencyImageCreateUnsupportedTypes) { TEST_DESCRIPTION("Create images with sparse residency with unsupported types"); // Determine which device feature are available VkPhysicalDeviceFeatures device_features = {}; ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features)); // Mask out device features we don't want and initialize device state device_features.sparseResidencyImage2D = VK_FALSE; device_features.sparseResidencyImage3D = VK_FALSE; ASSERT_NO_FATAL_FAILURE(InitState(&device_features)); if (!m_device->phy().features().sparseBinding) { printf("%s Test requires unsupported sparseBinding feature. Skipped.\n", kSkipPrefix); return; } VkImage image = VK_NULL_HANDLE; VkResult result = VK_RESULT_MAX_ENUM; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_1D; image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM; image_create_info.extent.width = 512; image_create_info.extent.height = 1; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image_create_info.queueFamilyIndexCount = 0; image_create_info.pQueueFamilyIndices = NULL; image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; image_create_info.flags = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_BUFFER_CREATE_SPARSE_BINDING_BIT; // 1D image w/ sparse residency is an error m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00970"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } // 2D image w/ sparse residency when feature isn't available image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.extent.height = 64; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00971"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } // 3D image w/ sparse residency when feature isn't available image_create_info.imageType = VK_IMAGE_TYPE_3D; image_create_info.extent.depth = 8; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00972"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } } TEST_F(VkLayerTest, SparseResidencyImageCreateUnsupportedSamples) { TEST_DESCRIPTION("Create images with sparse residency with unsupported tiling or sample counts"); // Determine which device feature are available VkPhysicalDeviceFeatures device_features = {}; ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features)); // These tests require that the device support sparse residency for 2D images if (VK_TRUE != device_features.sparseResidencyImage2D) { printf("%s Test requires unsupported SparseResidencyImage2D feature. Skipped.\n", kSkipPrefix); return; } // Mask out device features we don't want and initialize device state device_features.sparseResidency2Samples = VK_FALSE; device_features.sparseResidency4Samples = VK_FALSE; device_features.sparseResidency8Samples = VK_FALSE; device_features.sparseResidency16Samples = VK_FALSE; ASSERT_NO_FATAL_FAILURE(InitState(&device_features)); VkImage image = VK_NULL_HANDLE; VkResult result = VK_RESULT_MAX_ENUM; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM; image_create_info.extent.width = 64; image_create_info.extent.height = 64; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_LINEAR; image_create_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image_create_info.queueFamilyIndexCount = 0; image_create_info.pQueueFamilyIndices = NULL; image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; image_create_info.flags = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_BUFFER_CREATE_SPARSE_BINDING_BIT; // 2D image w/ sparse residency and linear tiling is an error m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT then image tiling of VK_IMAGE_TILING_LINEAR is not supported"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; // Multi-sample image w/ sparse residency when feature isn't available (4 flavors) image_create_info.samples = VK_SAMPLE_COUNT_2_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00973"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } image_create_info.samples = VK_SAMPLE_COUNT_4_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00974"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } image_create_info.samples = VK_SAMPLE_COUNT_8_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00975"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } image_create_info.samples = VK_SAMPLE_COUNT_16_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00976"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } } TEST_F(VkLayerTest, GpuValidationArrayOOB) { TEST_DESCRIPTION("GPU validation: Verify detection of out-of-bounds descriptor array indexing."); if (!VkRenderFramework::DeviceCanDraw()) { printf("%s GPU-Assisted validation test requires a driver that can draw.\n", kSkipPrefix); return; } #if defined(ANDROID) if (instance() == VK_NULL_HANDLE) { printf("%s Skipping test on Android temporarily while debugging test execution failure.\n", kSkipPrefix); return; } #endif VkValidationFeatureEnableEXT enables[] = {VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT}; VkValidationFeaturesEXT features = {}; features.sType = VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT; features.enabledValidationFeatureCount = 1; features.pEnabledValidationFeatures = enables; VkCommandPoolCreateFlags pool_flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, pool_flags, &features)); if (m_device->props.apiVersion < VK_API_VERSION_1_1) { printf("%s GPU-Assisted validation test requires Vulkan 1.1+.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Make a uniform buffer to be passed to the shader that contains the invalid array index. uint32_t qfi = 0; VkBufferCreateInfo bci = {}; bci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bci.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; bci.size = 1024; bci.queueFamilyIndexCount = 1; bci.pQueueFamilyIndices = &qfi; VkBufferObj buffer0; VkMemoryPropertyFlags mem_props = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; buffer0.init(*m_device, bci, mem_props); uint32_t *data = (uint32_t *)buffer0.memory().map(); data[0] = 25; buffer0.memory().unmap(); // Prepare descriptors OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, {1, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 6, VK_SHADER_STAGE_ALL, nullptr}, }); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkTextureObj texture(m_device, nullptr); VkSamplerObj sampler(m_device); VkDescriptorBufferInfo buffer_info[1] = {}; buffer_info[0].buffer = buffer0.handle(); buffer_info[0].offset = 0; buffer_info[0].range = sizeof(uint32_t); VkDescriptorImageInfo image_info[6] = {}; for (int i = 0; i < 6; i++) { image_info[i] = texture.DescriptorImageInfo(); image_info[i].sampler = sampler.handle(); image_info[i].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; } VkWriteDescriptorSet descriptor_writes[2] = {}; descriptor_writes[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_writes[0].dstSet = ds.set_; // descriptor_set; descriptor_writes[0].dstBinding = 0; descriptor_writes[0].descriptorCount = 1; descriptor_writes[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_writes[0].pBufferInfo = buffer_info; descriptor_writes[1].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_writes[1].dstSet = ds.set_; // descriptor_set; descriptor_writes[1].dstBinding = 1; descriptor_writes[1].descriptorCount = 6; descriptor_writes[1].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_writes[1].pImageInfo = image_info; vkUpdateDescriptorSets(m_device->device(), 2, descriptor_writes, 0, NULL); // Shader programs for array OOB test in vertex stage: // - The vertex shader fetches the invalid index from the uniform buffer and uses it to make an invalid index into another // array. char const *vsSource_vert = "#version 450\n" "\n" "layout(std140, set = 0, binding = 0) uniform foo { uint tex_index[1]; } uniform_index_buffer;\n" "layout(set = 0, binding = 1) uniform sampler2D tex[6];\n" "vec2 vertices[3];\n" "void main(){\n" " vertices[0] = vec2(-1.0, -1.0);\n" " vertices[1] = vec2( 1.0, -1.0);\n" " vertices[2] = vec2( 0.0, 1.0);\n" " gl_Position = vec4(vertices[gl_VertexIndex % 3], 0.0, 1.0);\n" " gl_Position += 1e-30 * texture(tex[uniform_index_buffer.tex_index[0]], vec2(0, 0));\n" "}\n"; char const *fsSource_vert = "#version 450\n" "\n" "layout(set = 0, binding = 1) uniform sampler2D tex[6];\n" "layout(location = 0) out vec4 uFragColor;\n" "void main(){\n" " uFragColor = texture(tex[0], vec2(0, 0));\n" "}\n"; // Shader programs for array OOB test in fragment stage: // - The vertex shader fetches the invalid index from the uniform buffer and passes it to the fragment shader. // - The fragment shader makes the invalid array access. char const *vsSource_frag = "#version 450\n" "\n" "layout(std140, binding = 0) uniform foo { uint tex_index[1]; } uniform_index_buffer;\n" "layout(location = 0) out flat uint tex_ind;\n" "vec2 vertices[3];\n" "void main(){\n" " vertices[0] = vec2(-1.0, -1.0);\n" " vertices[1] = vec2( 1.0, -1.0);\n" " vertices[2] = vec2( 0.0, 1.0);\n" " gl_Position = vec4(vertices[gl_VertexIndex % 3], 0.0, 1.0);\n" " tex_ind = uniform_index_buffer.tex_index[0];\n" "}\n"; char const *fsSource_frag = "#version 450\n" "\n" "layout(set = 0, binding = 1) uniform sampler2D tex[6];\n" "layout(location = 0) out vec4 uFragColor;\n" "layout(location = 0) in flat uint tex_ind;\n" "void main(){\n" " uFragColor = texture(tex[tex_ind], vec2(0, 0));\n" "}\n"; struct TestCase { char const *vertex_source; char const *fragment_source; bool debug; char const *expected_error; }; std::vector tests; tests.push_back({vsSource_vert, fsSource_vert, false, "Index of 25 used to index descriptor array of length 6."}); tests.push_back({vsSource_frag, fsSource_frag, false, "Index of 25 used to index descriptor array of length 6."}); #if !defined(ANDROID) // The Android test framework uses shaderc for online compilations. Even when configured to compile with debug info, // shaderc seems to drop the OpLine instructions from the shader binary. This causes the following two tests to fail // on Android platforms. Skip these tests until the shaderc issue is understood/resolved. tests.push_back({vsSource_vert, fsSource_vert, true, "gl_Position += 1e-30 * texture(tex[uniform_index_buffer.tex_index[0]], vec2(0, 0));"}); tests.push_back({vsSource_frag, fsSource_frag, true, "uFragColor = texture(tex[tex_ind], vec2(0, 0));"}); #endif VkViewport viewport = m_viewports[0]; VkRect2D scissors = m_scissors[0]; VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); for (const auto &iter : tests) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, iter.expected_error); VkShaderObj vs(m_device, iter.vertex_source, VK_SHADER_STAGE_VERTEX_BIT, this, "main", iter.debug); VkShaderObj fs(m_device, iter.fragment_source, VK_SHADER_STAGE_FRAGMENT_BIT, this, "main", iter.debug); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); VkResult err = pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, 0, nullptr); vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissors); vkCmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); vkCmdEndRenderPass(m_commandBuffer->handle()); m_commandBuffer->end(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); vkQueueWaitIdle(m_device->m_queue); m_errorMonitor->VerifyFound(); } return; } TEST_F(VkLayerTest, InvalidMemoryAliasing) { TEST_DESCRIPTION( "Create a buffer and image, allocate memory, and bind the buffer and image to memory such that they will alias."); VkResult err; bool pass; ASSERT_NO_FATAL_FAILURE(Init()); VkBuffer buffer, buffer2; VkImage image; VkImage image2; VkDeviceMemory mem; // buffer will be bound first VkDeviceMemory mem_img; // image bound first VkMemoryRequirements buff_mem_reqs, img_mem_reqs; VkMemoryRequirements buff_mem_reqs2, img_mem_reqs2; VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.pNext = NULL; buf_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buf_info.size = 256; buf_info.queueFamilyIndexCount = 0; buf_info.pQueueFamilyIndices = NULL; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; buf_info.flags = 0; err = vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); vkGetBufferMemoryRequirements(m_device->device(), buffer, &buff_mem_reqs); VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM; image_create_info.extent.width = 64; image_create_info.extent.height = 64; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; // Image tiling must be optimal to trigger error when aliasing linear buffer image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image_create_info.queueFamilyIndexCount = 0; image_create_info.pQueueFamilyIndices = NULL; image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; image_create_info.flags = 0; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image2); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), image, &img_mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; // Ensure memory is big enough for both bindings alloc_info.allocationSize = buff_mem_reqs.size + img_mem_reqs.size; pass = m_device->phy().set_memory_type(buff_mem_reqs.memoryTypeBits & img_mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); if (!pass) { printf("%s Failed to set memory type.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), buffer, NULL); vkDestroyImage(m_device->device(), image, NULL); vkDestroyImage(m_device->device(), image2, NULL); return; } err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, mem, 0); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), image2, &img_mem_reqs2); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, " is aliased with linear buffer 0x"); // VALIDATION FAILURE due to image mapping overlapping buffer mapping err = vkBindImageMemory(m_device->device(), image, mem, 0); m_errorMonitor->VerifyFound(); // Now correctly bind image2 to second mem allocation before incorrectly // aliasing buffer2 err = vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer2); ASSERT_VK_SUCCESS(err); err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem_img); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), image2, mem_img, 0); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "is aliased with non-linear image 0x"); vkGetBufferMemoryRequirements(m_device->device(), buffer2, &buff_mem_reqs2); err = vkBindBufferMemory(m_device->device(), buffer2, mem_img, 0); m_errorMonitor->VerifyFound(); vkDestroyBuffer(m_device->device(), buffer, NULL); vkDestroyBuffer(m_device->device(), buffer2, NULL); vkDestroyImage(m_device->device(), image, NULL); vkDestroyImage(m_device->device(), image2, NULL); vkFreeMemory(m_device->device(), mem, NULL); vkFreeMemory(m_device->device(), mem_img, NULL); } TEST_F(VkLayerTest, InvalidMemoryMapping) { TEST_DESCRIPTION("Attempt to map memory in a number of incorrect ways"); VkResult err; bool pass; ASSERT_NO_FATAL_FAILURE(Init()); VkBuffer buffer; VkDeviceMemory mem; VkMemoryRequirements mem_reqs; const VkDeviceSize atom_size = m_device->props.limits.nonCoherentAtomSize; VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.pNext = NULL; buf_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buf_info.size = 256; buf_info.queueFamilyIndexCount = 0; buf_info.pQueueFamilyIndices = NULL; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; buf_info.flags = 0; err = vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); vkGetBufferMemoryRequirements(m_device->device(), buffer, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; // Ensure memory is big enough for both bindings static const VkDeviceSize allocation_size = 0x10000; alloc_info.allocationSize = allocation_size; pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); if (!pass) { printf("%s Failed to set memory type.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), buffer, NULL); return; } err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem); ASSERT_VK_SUCCESS(err); uint8_t *pData; // Attempt to map memory size 0 is invalid m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VkMapMemory: Attempting to map memory range of size zero"); err = vkMapMemory(m_device->device(), mem, 0, 0, 0, (void **)&pData); m_errorMonitor->VerifyFound(); // Map memory twice err = vkMapMemory(m_device->device(), mem, 0, mem_reqs.size, 0, (void **)&pData); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VkMapMemory: Attempting to map memory on an already-mapped object "); err = vkMapMemory(m_device->device(), mem, 0, mem_reqs.size, 0, (void **)&pData); m_errorMonitor->VerifyFound(); // Unmap the memory to avoid re-map error vkUnmapMemory(m_device->device(), mem); // overstep allocation with VK_WHOLE_SIZE m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " with size of VK_WHOLE_SIZE oversteps total array size 0x"); err = vkMapMemory(m_device->device(), mem, allocation_size + 1, VK_WHOLE_SIZE, 0, (void **)&pData); m_errorMonitor->VerifyFound(); // overstep allocation w/o VK_WHOLE_SIZE m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " oversteps total array size 0x"); err = vkMapMemory(m_device->device(), mem, 1, allocation_size, 0, (void **)&pData); m_errorMonitor->VerifyFound(); // Now error due to unmapping memory that's not mapped m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Unmapping Memory without memory being mapped: "); vkUnmapMemory(m_device->device(), mem); m_errorMonitor->VerifyFound(); // Now map memory and cause errors due to flushing invalid ranges err = vkMapMemory(m_device->device(), mem, 4 * atom_size, VK_WHOLE_SIZE, 0, (void **)&pData); ASSERT_VK_SUCCESS(err); VkMappedMemoryRange mmr = {}; mmr.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; mmr.memory = mem; mmr.offset = atom_size; // Error b/c offset less than offset of mapped mem m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMappedMemoryRange-size-00685"); vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr); m_errorMonitor->VerifyFound(); // Now flush range that oversteps mapped range vkUnmapMemory(m_device->device(), mem); err = vkMapMemory(m_device->device(), mem, 0, 4 * atom_size, 0, (void **)&pData); ASSERT_VK_SUCCESS(err); mmr.offset = atom_size; mmr.size = 4 * atom_size; // Flushing bounds exceed mapped bounds m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMappedMemoryRange-size-00685"); vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr); m_errorMonitor->VerifyFound(); // Now flush range with VK_WHOLE_SIZE that oversteps offset vkUnmapMemory(m_device->device(), mem); err = vkMapMemory(m_device->device(), mem, 2 * atom_size, 4 * atom_size, 0, (void **)&pData); ASSERT_VK_SUCCESS(err); mmr.offset = atom_size; mmr.size = VK_WHOLE_SIZE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMappedMemoryRange-size-00686"); vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr); m_errorMonitor->VerifyFound(); // Some platforms have an atomsize of 1 which makes the test meaningless if (atom_size > 3) { // Now with an offset NOT a multiple of the device limit vkUnmapMemory(m_device->device(), mem); err = vkMapMemory(m_device->device(), mem, 0, 4 * atom_size, 0, (void **)&pData); ASSERT_VK_SUCCESS(err); mmr.offset = 3; // Not a multiple of atom_size mmr.size = VK_WHOLE_SIZE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMappedMemoryRange-offset-00687"); vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr); m_errorMonitor->VerifyFound(); // Now with a size NOT a multiple of the device limit vkUnmapMemory(m_device->device(), mem); err = vkMapMemory(m_device->device(), mem, 0, 4 * atom_size, 0, (void **)&pData); ASSERT_VK_SUCCESS(err); mmr.offset = atom_size; mmr.size = 2 * atom_size + 1; // Not a multiple of atom_size m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMappedMemoryRange-size-01390"); vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr); m_errorMonitor->VerifyFound(); } pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); if (!pass) { printf("%s Failed to set memory type.\n", kSkipPrefix); vkFreeMemory(m_device->device(), mem, NULL); vkDestroyBuffer(m_device->device(), buffer, NULL); return; } // TODO : If we can get HOST_VISIBLE w/o HOST_COHERENT we can test cases of // kVUID_Core_MemTrack_InvalidMap in validateAndCopyNoncoherentMemoryToDriver() vkDestroyBuffer(m_device->device(), buffer, NULL); vkFreeMemory(m_device->device(), mem, NULL); } TEST_F(VkLayerTest, MapMemWithoutHostVisibleBit) { TEST_DESCRIPTION("Allocate memory that is not mappable and then attempt to map it."); VkResult err; bool pass; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkMapMemory-memory-00682"); ASSERT_NO_FATAL_FAILURE(Init()); VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = 1024; pass = m_device->phy().set_memory_type(0xFFFFFFFF, &mem_alloc, 0, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); if (!pass) { // If we can't find any unmappable memory this test doesn't // make sense printf("%s No unmappable memory types found, skipping test\n", kSkipPrefix); return; } VkDeviceMemory mem; err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem); ASSERT_VK_SUCCESS(err); void *mappedAddress = NULL; err = vkMapMemory(m_device->device(), mem, 0, VK_WHOLE_SIZE, 0, &mappedAddress); m_errorMonitor->VerifyFound(); vkFreeMemory(m_device->device(), mem, NULL); } TEST_F(VkLayerTest, RebindMemory) { VkResult err; bool pass; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "which has already been bound to mem object"); ASSERT_NO_FATAL_FAILURE(Init()); // Create an image, allocate memory, free it, and then try to bind it VkImage image; VkDeviceMemory mem1; VkDeviceMemory mem2; VkMemoryRequirements mem_reqs; const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; const int32_t tex_width = 32; const int32_t tex_height = 32; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = tex_width; image_create_info.extent.height = tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT; image_create_info.flags = 0; VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = 0; mem_alloc.memoryTypeIndex = 0; // Introduce failure, do NOT set memProps to // VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT mem_alloc.memoryTypeIndex = 1; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), image, &mem_reqs); mem_alloc.allocationSize = mem_reqs.size; pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, 0); ASSERT_TRUE(pass); // allocate 2 memory objects err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem1); ASSERT_VK_SUCCESS(err); err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem2); ASSERT_VK_SUCCESS(err); // Bind first memory object to Image object err = vkBindImageMemory(m_device->device(), image, mem1, 0); ASSERT_VK_SUCCESS(err); // Introduce validation failure, try to bind a different memory object to // the same image object err = vkBindImageMemory(m_device->device(), image, mem2, 0); m_errorMonitor->VerifyFound(); vkDestroyImage(m_device->device(), image, NULL); vkFreeMemory(m_device->device(), mem1, NULL); vkFreeMemory(m_device->device(), mem2, NULL); } TEST_F(VkLayerTest, QueryMemoryCommitmentWithoutLazyProperty) { TEST_DESCRIPTION("Attempt to query memory commitment on memory without lazy allocation"); ASSERT_NO_FATAL_FAILURE(Init()); auto image_ci = vk_testing::Image::create_info(); image_ci.imageType = VK_IMAGE_TYPE_2D; image_ci.format = VK_FORMAT_B8G8R8A8_UNORM; image_ci.extent.width = 32; image_ci.extent.height = 32; image_ci.tiling = VK_IMAGE_TILING_OPTIMAL; image_ci.usage = VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; vk_testing::Image image; image.init_no_mem(*m_device, image_ci); auto mem_reqs = image.memory_requirements(); // memory_type_index is set to 0 here, but is set properly below auto image_alloc_info = vk_testing::DeviceMemory::alloc_info(mem_reqs.size, 0); bool pass; // the last argument is the "forbid" argument for set_memory_type, disallowing // that particular memory type rather than requiring it pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &image_alloc_info, 0, VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT); if (!pass) { printf("%s Failed to set memory type.\n", kSkipPrefix); return; } vk_testing::DeviceMemory mem; mem.init(*m_device, image_alloc_info); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetDeviceMemoryCommitment-memory-00690"); VkDeviceSize size; vkGetDeviceMemoryCommitment(m_device->device(), mem.handle(), &size); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, SubmitSignaledFence) { vk_testing::Fence testFence; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "submitted in SIGNALED state. Fences must be reset before being submitted"); VkFenceCreateInfo fenceInfo = {}; fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceInfo.pNext = NULL; fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT; ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); m_commandBuffer->ClearAllBuffers(m_renderTargets, m_clear_color, nullptr, m_depth_clear_color, m_stencil_clear_color); m_commandBuffer->end(); testFence.init(*m_device, fenceInfo); VkSubmitInfo submit_info; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.pNext = NULL; submit_info.waitSemaphoreCount = 0; submit_info.pWaitSemaphores = NULL; submit_info.pWaitDstStageMask = NULL; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); submit_info.signalSemaphoreCount = 0; submit_info.pSignalSemaphores = NULL; vkQueueSubmit(m_device->m_queue, 1, &submit_info, testFence.handle()); vkQueueWaitIdle(m_device->m_queue); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidUsageBits) { TEST_DESCRIPTION( "Specify wrong usage for image then create conflicting view of image Initialize buffer with wrong usage then perform copy " "expecting errors from both the image and the buffer (2 calls)"); ASSERT_NO_FATAL_FAILURE(Init()); auto format = FindSupportedDepthStencilFormat(gpu()); if (!format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } VkImageObj image(m_device); // Initialize image with transfer source usage image.Init(128, 128, 1, format, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView dsv; VkImageViewCreateInfo dsvci = {}; dsvci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; dsvci.image = image.handle(); dsvci.viewType = VK_IMAGE_VIEW_TYPE_2D; dsvci.format = format; dsvci.subresourceRange.layerCount = 1; dsvci.subresourceRange.baseMipLevel = 0; dsvci.subresourceRange.levelCount = 1; dsvci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; // Create a view with depth / stencil aspect for image with different usage m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Invalid usage flag for Image "); vkCreateImageView(m_device->device(), &dsvci, NULL, &dsv); m_errorMonitor->VerifyFound(); // Initialize buffer with TRANSFER_DST usage VkBufferObj buffer; VkMemoryPropertyFlags reqs = 0; buffer.init_as_dst(*m_device, 128 * 128, reqs); VkBufferImageCopy region = {}; region.bufferRowLength = 128; region.bufferImageHeight = 128; region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; region.imageSubresource.layerCount = 1; region.imageExtent.height = 16; region.imageExtent.width = 16; region.imageExtent.depth = 1; // Buffer usage not set to TRANSFER_SRC and image usage not set to TRANSFER_DST m_commandBuffer->begin(); // two separate errors from this call: m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-dstImage-00177"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-srcBuffer-00174"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer.handle(), image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, LeakAnObject) { VkResult err; TEST_DESCRIPTION("Create a fence and destroy its device without first destroying the fence."); // Note that we have to create a new device since destroying the // framework's device causes Teardown() to fail and just calling Teardown // will destroy the errorMonitor. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "has not been destroyed."); ASSERT_NO_FATAL_FAILURE(Init()); vk_testing::QueueCreateInfoArray queue_info(m_device->queue_props); // The sacrificial device object VkDevice testDevice; VkDeviceCreateInfo device_create_info = {}; auto features = m_device->phy().features(); device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; device_create_info.pNext = NULL; device_create_info.queueCreateInfoCount = queue_info.size(); device_create_info.pQueueCreateInfos = queue_info.data(); device_create_info.enabledLayerCount = 0; device_create_info.ppEnabledLayerNames = NULL; device_create_info.pEnabledFeatures = &features; err = vkCreateDevice(gpu(), &device_create_info, NULL, &testDevice); ASSERT_VK_SUCCESS(err); VkFence fence; VkFenceCreateInfo fence_create_info = {}; fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fence_create_info.pNext = NULL; fence_create_info.flags = 0; err = vkCreateFence(testDevice, &fence_create_info, NULL, &fence); ASSERT_VK_SUCCESS(err); // Induce failure by not calling vkDestroyFence vkDestroyDevice(testDevice, NULL); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidCommandPoolConsistency) { TEST_DESCRIPTION("Allocate command buffers from one command pool and attempt to delete them from another."); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "FreeCommandBuffers is attempting to free Command Buffer"); ASSERT_NO_FATAL_FAILURE(Init()); VkCommandPool command_pool_one; VkCommandPool command_pool_two; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool_one); vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool_two); VkCommandBuffer cb; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool_one; command_buffer_allocate_info.commandBufferCount = 1; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &cb); vkFreeCommandBuffers(m_device->device(), command_pool_two, 1, &cb); m_errorMonitor->VerifyFound(); vkDestroyCommandPool(m_device->device(), command_pool_one, NULL); vkDestroyCommandPool(m_device->device(), command_pool_two, NULL); } TEST_F(VkLayerTest, InvalidDescriptorPoolConsistency) { VkResult err; TEST_DESCRIPTION("Allocate descriptor sets from one DS pool and attempt to delete them from another."); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "FreeDescriptorSets is attempting to free descriptorSet"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_SAMPLER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.flags = 0; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool bad_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &bad_pool); ASSERT_VK_SUCCESS(err); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); err = vkFreeDescriptorSets(m_device->device(), bad_pool, 1, &ds.set_); m_errorMonitor->VerifyFound(); vkDestroyDescriptorPool(m_device->device(), bad_pool, NULL); } TEST_F(VkLayerTest, CreateUnknownObject) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetImageMemoryRequirements-image-parameter"); TEST_DESCRIPTION("Pass an invalid image object handle into a Vulkan API call."); ASSERT_NO_FATAL_FAILURE(Init()); // Pass bogus handle into GetImageMemoryRequirements VkMemoryRequirements mem_reqs; uint64_t fakeImageHandle = 0xCADECADE; VkImage fauxImage = reinterpret_cast(fakeImageHandle); vkGetImageMemoryRequirements(m_device->device(), fauxImage, &mem_reqs); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, UseObjectWithWrongDevice) { TEST_DESCRIPTION( "Try to destroy a render pass object using a device other than the one it was created on. This should generate a distinct " "error from the invalid handle error."); // Create first device and renderpass ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Create second device float priorities[] = {1.0f}; VkDeviceQueueCreateInfo queue_info{}; queue_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; queue_info.pNext = NULL; queue_info.flags = 0; queue_info.queueFamilyIndex = 0; queue_info.queueCount = 1; queue_info.pQueuePriorities = &priorities[0]; VkDeviceCreateInfo device_create_info = {}; auto features = m_device->phy().features(); device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; device_create_info.pNext = NULL; device_create_info.queueCreateInfoCount = 1; device_create_info.pQueueCreateInfos = &queue_info; device_create_info.enabledLayerCount = 0; device_create_info.ppEnabledLayerNames = NULL; device_create_info.pEnabledFeatures = &features; VkDevice second_device; ASSERT_VK_SUCCESS(vkCreateDevice(gpu(), &device_create_info, NULL, &second_device)); // Try to destroy the renderpass from the first device using the second device m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyRenderPass-renderPass-parent"); vkDestroyRenderPass(second_device, m_renderPass, NULL); m_errorMonitor->VerifyFound(); vkDestroyDevice(second_device, NULL); } TEST_F(VkLayerTest, PipelineNotBound) { TEST_DESCRIPTION("Pass in an invalid pipeline object handle into a Vulkan API call."); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBindPipeline-pipeline-parameter"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipeline badPipeline = (VkPipeline)((size_t)0xbaadb1be); m_commandBuffer->begin(); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, badPipeline); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, BindImageInvalidMemoryType) { VkResult err; TEST_DESCRIPTION("Test validation check for an invalid memory type index during bind[Buffer|Image]Memory time"); ASSERT_NO_FATAL_FAILURE(Init()); // Create an image, allocate memory, set a bad typeIndex and then try to // bind it VkImage image; VkDeviceMemory mem; VkMemoryRequirements mem_reqs; const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; const int32_t tex_width = 32; const int32_t tex_height = 32; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = tex_width; image_create_info.extent.height = tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT; image_create_info.flags = 0; VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = 0; mem_alloc.memoryTypeIndex = 0; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), image, &mem_reqs); mem_alloc.allocationSize = mem_reqs.size; // Introduce Failure, select invalid TypeIndex VkPhysicalDeviceMemoryProperties memory_info; vkGetPhysicalDeviceMemoryProperties(gpu(), &memory_info); unsigned int i; for (i = 0; i < memory_info.memoryTypeCount; i++) { if ((mem_reqs.memoryTypeBits & (1 << i)) == 0) { mem_alloc.memoryTypeIndex = i; break; } } if (i >= memory_info.memoryTypeCount) { printf("%s No invalid memory type index could be found; skipped.\n", kSkipPrefix); vkDestroyImage(m_device->device(), image, NULL); return; } m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "for this object type are not compatible with the memory"); err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), image, mem, 0); (void)err; m_errorMonitor->VerifyFound(); vkDestroyImage(m_device->device(), image, NULL); vkFreeMemory(m_device->device(), mem, NULL); } TEST_F(VkLayerTest, BindInvalidMemory) { VkResult err; bool pass; ASSERT_NO_FATAL_FAILURE(Init()); const VkFormat tex_format = VK_FORMAT_R8G8B8A8_UNORM; const int32_t tex_width = 256; const int32_t tex_height = 256; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = tex_width; image_create_info.extent.height = tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT; image_create_info.flags = 0; VkBufferCreateInfo buffer_create_info = {}; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.pNext = NULL; buffer_create_info.flags = 0; buffer_create_info.size = 4 * 1024 * 1024; buffer_create_info.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; buffer_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; // Create an image/buffer, allocate memory, free it, and then try to bind it { VkImage image = VK_NULL_HANDLE; VkBuffer buffer = VK_NULL_HANDLE; err = vkCreateImage(device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); err = vkCreateBuffer(device(), &buffer_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements image_mem_reqs = {}, buffer_mem_reqs = {}; vkGetImageMemoryRequirements(device(), image, &image_mem_reqs); vkGetBufferMemoryRequirements(device(), buffer, &buffer_mem_reqs); VkMemoryAllocateInfo image_mem_alloc = {}, buffer_mem_alloc = {}; image_mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; image_mem_alloc.allocationSize = image_mem_reqs.size; pass = m_device->phy().set_memory_type(image_mem_reqs.memoryTypeBits, &image_mem_alloc, 0); ASSERT_TRUE(pass); buffer_mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; buffer_mem_alloc.allocationSize = buffer_mem_reqs.size; pass = m_device->phy().set_memory_type(buffer_mem_reqs.memoryTypeBits, &buffer_mem_alloc, 0); ASSERT_TRUE(pass); VkDeviceMemory image_mem = VK_NULL_HANDLE, buffer_mem = VK_NULL_HANDLE; err = vkAllocateMemory(device(), &image_mem_alloc, NULL, &image_mem); ASSERT_VK_SUCCESS(err); err = vkAllocateMemory(device(), &buffer_mem_alloc, NULL, &buffer_mem); ASSERT_VK_SUCCESS(err); vkFreeMemory(device(), image_mem, NULL); vkFreeMemory(device(), buffer_mem, NULL); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-memory-parameter"); err = vkBindImageMemory(device(), image, image_mem, 0); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-memory-parameter"); err = vkBindBufferMemory(device(), buffer, buffer_mem, 0); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); vkDestroyImage(m_device->device(), image, NULL); vkDestroyBuffer(m_device->device(), buffer, NULL); } // Try to bind memory to an object that already has a memory binding { VkImage image = VK_NULL_HANDLE; err = vkCreateImage(device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); VkBuffer buffer = VK_NULL_HANDLE; err = vkCreateBuffer(device(), &buffer_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements image_mem_reqs = {}, buffer_mem_reqs = {}; vkGetImageMemoryRequirements(device(), image, &image_mem_reqs); vkGetBufferMemoryRequirements(device(), buffer, &buffer_mem_reqs); VkMemoryAllocateInfo image_alloc_info = {}, buffer_alloc_info = {}; image_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; image_alloc_info.allocationSize = image_mem_reqs.size; buffer_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; buffer_alloc_info.allocationSize = buffer_mem_reqs.size; pass = m_device->phy().set_memory_type(image_mem_reqs.memoryTypeBits, &image_alloc_info, 0); ASSERT_TRUE(pass); pass = m_device->phy().set_memory_type(buffer_mem_reqs.memoryTypeBits, &buffer_alloc_info, 0); ASSERT_TRUE(pass); VkDeviceMemory image_mem, buffer_mem; err = vkAllocateMemory(device(), &image_alloc_info, NULL, &image_mem); ASSERT_VK_SUCCESS(err); err = vkAllocateMemory(device(), &buffer_alloc_info, NULL, &buffer_mem); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(device(), image, image_mem, 0); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-image-01044"); err = vkBindImageMemory(device(), image, image_mem, 0); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); err = vkBindBufferMemory(device(), buffer, buffer_mem, 0); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-buffer-01029"); err = vkBindBufferMemory(device(), buffer, buffer_mem, 0); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); vkFreeMemory(device(), image_mem, NULL); vkFreeMemory(device(), buffer_mem, NULL); vkDestroyImage(device(), image, NULL); vkDestroyBuffer(device(), buffer, NULL); } // Try to bind memory to an object with an invalid memoryOffset { VkImage image = VK_NULL_HANDLE; err = vkCreateImage(device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); VkBuffer buffer = VK_NULL_HANDLE; err = vkCreateBuffer(device(), &buffer_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements image_mem_reqs = {}, buffer_mem_reqs = {}; vkGetImageMemoryRequirements(device(), image, &image_mem_reqs); vkGetBufferMemoryRequirements(device(), buffer, &buffer_mem_reqs); VkMemoryAllocateInfo image_alloc_info = {}, buffer_alloc_info = {}; image_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; // Leave some extra space for alignment wiggle room image_alloc_info.allocationSize = image_mem_reqs.size + image_mem_reqs.alignment; buffer_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; buffer_alloc_info.allocationSize = buffer_mem_reqs.size + buffer_mem_reqs.alignment; pass = m_device->phy().set_memory_type(image_mem_reqs.memoryTypeBits, &image_alloc_info, 0); ASSERT_TRUE(pass); pass = m_device->phy().set_memory_type(buffer_mem_reqs.memoryTypeBits, &buffer_alloc_info, 0); ASSERT_TRUE(pass); VkDeviceMemory image_mem, buffer_mem; err = vkAllocateMemory(device(), &image_alloc_info, NULL, &image_mem); ASSERT_VK_SUCCESS(err); err = vkAllocateMemory(device(), &buffer_alloc_info, NULL, &buffer_mem); ASSERT_VK_SUCCESS(err); // Test unaligned memory offset { if (image_mem_reqs.alignment > 1) { VkDeviceSize image_offset = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-memoryOffset-01048"); err = vkBindImageMemory(device(), image, image_mem, image_offset); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); } if (buffer_mem_reqs.alignment > 1) { VkDeviceSize buffer_offset = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-memoryOffset-01036"); err = vkBindBufferMemory(device(), buffer, buffer_mem, buffer_offset); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); } } // Test memory offsets outside the memory allocation { VkDeviceSize image_offset = (image_alloc_info.allocationSize + image_mem_reqs.alignment) & ~(image_mem_reqs.alignment - 1); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-memoryOffset-01046"); err = vkBindImageMemory(device(), image, image_mem, image_offset); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); VkDeviceSize buffer_offset = (buffer_alloc_info.allocationSize + buffer_mem_reqs.alignment) & ~(buffer_mem_reqs.alignment - 1); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-memoryOffset-01031"); err = vkBindBufferMemory(device(), buffer, buffer_mem, buffer_offset); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); } // Test memory offsets within the memory allocation, but which leave too little memory for // the resource. { VkDeviceSize image_offset = (image_mem_reqs.size - 1) & ~(image_mem_reqs.alignment - 1); if ((image_offset > 0) && (image_mem_reqs.size < (image_alloc_info.allocationSize - image_mem_reqs.alignment))) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-size-01049"); err = vkBindImageMemory(device(), image, image_mem, image_offset); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); } VkDeviceSize buffer_offset = (buffer_mem_reqs.size - 1) & ~(buffer_mem_reqs.alignment - 1); if (buffer_offset > 0) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-size-01037"); err = vkBindBufferMemory(device(), buffer, buffer_mem, buffer_offset); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); } } vkFreeMemory(device(), image_mem, NULL); vkFreeMemory(device(), buffer_mem, NULL); vkDestroyImage(device(), image, NULL); vkDestroyBuffer(device(), buffer, NULL); } // Try to bind memory to an object with an invalid memory type { VkImage image = VK_NULL_HANDLE; err = vkCreateImage(device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); VkBuffer buffer = VK_NULL_HANDLE; err = vkCreateBuffer(device(), &buffer_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements image_mem_reqs = {}, buffer_mem_reqs = {}; vkGetImageMemoryRequirements(device(), image, &image_mem_reqs); vkGetBufferMemoryRequirements(device(), buffer, &buffer_mem_reqs); VkMemoryAllocateInfo image_alloc_info = {}, buffer_alloc_info = {}; image_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; image_alloc_info.allocationSize = image_mem_reqs.size; buffer_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; buffer_alloc_info.allocationSize = buffer_mem_reqs.size; // Create a mask of available memory types *not* supported by these resources, // and try to use one of them. VkPhysicalDeviceMemoryProperties memory_properties = {}; vkGetPhysicalDeviceMemoryProperties(m_device->phy().handle(), &memory_properties); VkDeviceMemory image_mem, buffer_mem; uint32_t image_unsupported_mem_type_bits = ((1 << memory_properties.memoryTypeCount) - 1) & ~image_mem_reqs.memoryTypeBits; if (image_unsupported_mem_type_bits != 0) { pass = m_device->phy().set_memory_type(image_unsupported_mem_type_bits, &image_alloc_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(device(), &image_alloc_info, NULL, &image_mem); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-memory-01047"); err = vkBindImageMemory(device(), image, image_mem, 0); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); vkFreeMemory(device(), image_mem, NULL); } uint32_t buffer_unsupported_mem_type_bits = ((1 << memory_properties.memoryTypeCount) - 1) & ~buffer_mem_reqs.memoryTypeBits; if (buffer_unsupported_mem_type_bits != 0) { pass = m_device->phy().set_memory_type(buffer_unsupported_mem_type_bits, &buffer_alloc_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(device(), &buffer_alloc_info, NULL, &buffer_mem); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-memory-01035"); err = vkBindBufferMemory(device(), buffer, buffer_mem, 0); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); vkFreeMemory(device(), buffer_mem, NULL); } vkDestroyImage(device(), image, NULL); vkDestroyBuffer(device(), buffer, NULL); } // Try to bind memory to an image created with sparse memory flags { VkImageCreateInfo sparse_image_create_info = image_create_info; sparse_image_create_info.flags |= VK_IMAGE_CREATE_SPARSE_BINDING_BIT; VkImageFormatProperties image_format_properties = {}; err = vkGetPhysicalDeviceImageFormatProperties(m_device->phy().handle(), sparse_image_create_info.format, sparse_image_create_info.imageType, sparse_image_create_info.tiling, sparse_image_create_info.usage, sparse_image_create_info.flags, &image_format_properties); if (!m_device->phy().features().sparseResidencyImage2D || err == VK_ERROR_FORMAT_NOT_SUPPORTED) { // most likely means sparse formats aren't supported here; skip this test. } else { ASSERT_VK_SUCCESS(err); if (image_format_properties.maxExtent.width == 0) { printf("%s Sparse image format not supported; skipped.\n", kSkipPrefix); return; } else { VkImage sparse_image = VK_NULL_HANDLE; err = vkCreateImage(m_device->device(), &sparse_image_create_info, NULL, &sparse_image); ASSERT_VK_SUCCESS(err); VkMemoryRequirements sparse_mem_reqs = {}; vkGetImageMemoryRequirements(m_device->device(), sparse_image, &sparse_mem_reqs); if (sparse_mem_reqs.memoryTypeBits != 0) { VkMemoryAllocateInfo sparse_mem_alloc = {}; sparse_mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; sparse_mem_alloc.pNext = NULL; sparse_mem_alloc.allocationSize = sparse_mem_reqs.size; sparse_mem_alloc.memoryTypeIndex = 0; pass = m_device->phy().set_memory_type(sparse_mem_reqs.memoryTypeBits, &sparse_mem_alloc, 0); ASSERT_TRUE(pass); VkDeviceMemory sparse_mem = VK_NULL_HANDLE; err = vkAllocateMemory(m_device->device(), &sparse_mem_alloc, NULL, &sparse_mem); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-image-01045"); err = vkBindImageMemory(m_device->device(), sparse_image, sparse_mem, 0); // This may very well return an error. (void)err; m_errorMonitor->VerifyFound(); vkFreeMemory(m_device->device(), sparse_mem, NULL); } vkDestroyImage(m_device->device(), sparse_image, NULL); } } } // Try to bind memory to a buffer created with sparse memory flags { VkBufferCreateInfo sparse_buffer_create_info = buffer_create_info; sparse_buffer_create_info.flags |= VK_IMAGE_CREATE_SPARSE_BINDING_BIT; if (!m_device->phy().features().sparseResidencyBuffer) { // most likely means sparse formats aren't supported here; skip this test. } else { VkBuffer sparse_buffer = VK_NULL_HANDLE; err = vkCreateBuffer(m_device->device(), &sparse_buffer_create_info, NULL, &sparse_buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements sparse_mem_reqs = {}; vkGetBufferMemoryRequirements(m_device->device(), sparse_buffer, &sparse_mem_reqs); if (sparse_mem_reqs.memoryTypeBits != 0) { VkMemoryAllocateInfo sparse_mem_alloc = {}; sparse_mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; sparse_mem_alloc.pNext = NULL; sparse_mem_alloc.allocationSize = sparse_mem_reqs.size; sparse_mem_alloc.memoryTypeIndex = 0; pass = m_device->phy().set_memory_type(sparse_mem_reqs.memoryTypeBits, &sparse_mem_alloc, 0); ASSERT_TRUE(pass); VkDeviceMemory sparse_mem = VK_NULL_HANDLE; err = vkAllocateMemory(m_device->device(), &sparse_mem_alloc, NULL, &sparse_mem); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-buffer-01030"); err = vkBindBufferMemory(m_device->device(), sparse_buffer, sparse_mem, 0); // This may very well return an error. (void)err; m_errorMonitor->VerifyFound(); vkFreeMemory(m_device->device(), sparse_mem, NULL); } vkDestroyBuffer(m_device->device(), sparse_buffer, NULL); } } } TEST_F(VkLayerTest, BindMemoryToDestroyedObject) { VkResult err; bool pass; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-image-parameter"); ASSERT_NO_FATAL_FAILURE(Init()); // Create an image object, allocate memory, destroy the object and then try // to bind it VkImage image; VkDeviceMemory mem; VkMemoryRequirements mem_reqs; const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; const int32_t tex_width = 32; const int32_t tex_height = 32; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = tex_width; image_create_info.extent.height = tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT; image_create_info.flags = 0; VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = 0; mem_alloc.memoryTypeIndex = 0; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), image, &mem_reqs); mem_alloc.allocationSize = mem_reqs.size; pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, 0); ASSERT_TRUE(pass); // Allocate memory err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem); ASSERT_VK_SUCCESS(err); // Introduce validation failure, destroy Image object before binding vkDestroyImage(m_device->device(), image, NULL); ASSERT_VK_SUCCESS(err); // Now Try to bind memory to this destroyed object err = vkBindImageMemory(m_device->device(), image, mem, 0); // This may very well return an error. (void)err; m_errorMonitor->VerifyFound(); vkFreeMemory(m_device->device(), mem, NULL); } TEST_F(VkLayerTest, ExceedMemoryAllocationCount) { VkResult err = VK_SUCCESS; const int max_mems = 32; VkDeviceMemory mems[max_mems + 1]; if (!EnableDeviceProfileLayer()) { printf("%s Failed to enable device profile layer.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); PFN_vkSetPhysicalDeviceLimitsEXT fpvkSetPhysicalDeviceLimitsEXT = (PFN_vkSetPhysicalDeviceLimitsEXT)vkGetInstanceProcAddr(instance(), "vkSetPhysicalDeviceLimitsEXT"); PFN_vkGetOriginalPhysicalDeviceLimitsEXT fpvkGetOriginalPhysicalDeviceLimitsEXT = (PFN_vkGetOriginalPhysicalDeviceLimitsEXT)vkGetInstanceProcAddr(instance(), "vkGetOriginalPhysicalDeviceLimitsEXT"); if (!(fpvkSetPhysicalDeviceLimitsEXT) || !(fpvkGetOriginalPhysicalDeviceLimitsEXT)) { printf("%s Can't find device_profile_api functions; skipped.\n", kSkipPrefix); return; } VkPhysicalDeviceProperties props; fpvkGetOriginalPhysicalDeviceLimitsEXT(gpu(), &props.limits); if (props.limits.maxMemoryAllocationCount > max_mems) { props.limits.maxMemoryAllocationCount = max_mems; fpvkSetPhysicalDeviceLimitsEXT(gpu(), &props.limits); } ASSERT_NO_FATAL_FAILURE(InitState()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Number of currently valid memory objects is not less than the maximum allowed"); VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.memoryTypeIndex = 0; mem_alloc.allocationSize = 4; int i; for (i = 0; i <= max_mems; i++) { err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mems[i]); if (err != VK_SUCCESS) { break; } } m_errorMonitor->VerifyFound(); for (int j = 0; j < i; j++) { vkFreeMemory(m_device->device(), mems[j], NULL); } } TEST_F(VkLayerTest, CreatePipelineBadVertexAttributeFormat) { TEST_DESCRIPTION("Test that pipeline validation catches invalid vertex attribute formats"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkVertexInputBindingDescription input_binding; memset(&input_binding, 0, sizeof(input_binding)); VkVertexInputAttributeDescription input_attribs; memset(&input_attribs, 0, sizeof(input_attribs)); // Pick a really bad format for this purpose and make sure it should fail input_attribs.format = VK_FORMAT_BC2_UNORM_BLOCK; VkFormatProperties format_props = m_device->format_properties(input_attribs.format); if ((format_props.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) != 0) { printf("%s Format unsuitable for test; skipped.\n", kSkipPrefix); return; } input_attribs.location = 0; char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkVertexInputAttributeDescription-format-00623"); VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddVertexInputBindings(&input_binding, 1); pipe.AddVertexInputAttribs(&input_attribs, 1); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ImageSampleCounts) { TEST_DESCRIPTION("Use bad sample counts in image transfer calls to trigger validation errors."); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); VkMemoryPropertyFlags reqs = 0; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 256; image_create_info.extent.height = 256; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.flags = 0; VkImageBlit blit_region = {}; blit_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blit_region.srcSubresource.baseArrayLayer = 0; blit_region.srcSubresource.layerCount = 1; blit_region.srcSubresource.mipLevel = 0; blit_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blit_region.dstSubresource.baseArrayLayer = 0; blit_region.dstSubresource.layerCount = 1; blit_region.dstSubresource.mipLevel = 0; blit_region.srcOffsets[0] = {0, 0, 0}; blit_region.srcOffsets[1] = {256, 256, 1}; blit_region.dstOffsets[0] = {0, 0, 0}; blit_region.dstOffsets[1] = {128, 128, 1}; // Create two images, the source with sampleCount = 4, and attempt to blit // between them { image_create_info.samples = VK_SAMPLE_COUNT_4_BIT; image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT; VkImageObj src_image(m_device); src_image.init(&image_create_info); src_image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; VkImageObj dst_image(m_device); dst_image.init(&image_create_info); dst_image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); m_commandBuffer->begin(); // TODO: These 2 VUs are redundant - expect one of them to go away m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00233"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00228"); vkCmdBlitImage(m_commandBuffer->handle(), src_image.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dst_image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } // Create two images, the dest with sampleCount = 4, and attempt to blit // between them { image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT; VkImageObj src_image(m_device); src_image.init(&image_create_info); src_image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); image_create_info.samples = VK_SAMPLE_COUNT_4_BIT; image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; VkImageObj dst_image(m_device); dst_image.init(&image_create_info); dst_image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); m_commandBuffer->begin(); // TODO: These 2 VUs are redundant - expect one of them to go away m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-00234"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00228"); vkCmdBlitImage(m_commandBuffer->handle(), src_image.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dst_image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } VkBufferImageCopy copy_region = {}; copy_region.bufferRowLength = 128; copy_region.bufferImageHeight = 128; copy_region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.imageSubresource.layerCount = 1; copy_region.imageExtent.height = 64; copy_region.imageExtent.width = 64; copy_region.imageExtent.depth = 1; // Create src buffer and dst image with sampleCount = 4 and attempt to copy // buffer to image { VkBufferObj src_buffer; src_buffer.init_as_src(*m_device, 128 * 128 * 4, reqs); image_create_info.samples = VK_SAMPLE_COUNT_4_BIT; image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; VkImageObj dst_image(m_device); dst_image.init(&image_create_info); dst_image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); m_commandBuffer->begin(); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "was created with a sample count of VK_SAMPLE_COUNT_4_BIT but must be VK_SAMPLE_COUNT_1_BIT"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), src_buffer.handle(), dst_image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ©_region); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } // Create dst buffer and src image with sampleCount = 4 and attempt to copy // image to buffer { VkBufferObj dst_buffer; dst_buffer.init_as_dst(*m_device, 128 * 128 * 4, reqs); image_create_info.samples = VK_SAMPLE_COUNT_4_BIT; image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT; vk_testing::Image src_image; src_image.init(*m_device, (const VkImageCreateInfo &)image_create_info, reqs); m_commandBuffer->begin(); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "was created with a sample count of VK_SAMPLE_COUNT_4_BIT but must be VK_SAMPLE_COUNT_1_BIT"); vkCmdCopyImageToBuffer(m_commandBuffer->handle(), src_image.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dst_buffer.handle(), 1, ©_region); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } } TEST_F(VkLayerTest, BlitImageFormatTypes) { ASSERT_NO_FATAL_FAILURE(Init()); VkFormat f_unsigned = VK_FORMAT_R8G8B8A8_UINT; VkFormat f_signed = VK_FORMAT_R8G8B8A8_SINT; VkFormat f_float = VK_FORMAT_R32_SFLOAT; VkFormat f_depth = VK_FORMAT_D32_SFLOAT_S8_UINT; VkFormat f_depth2 = VK_FORMAT_D32_SFLOAT; if (!ImageFormatIsSupported(gpu(), f_unsigned, VK_IMAGE_TILING_OPTIMAL) || !ImageFormatIsSupported(gpu(), f_signed, VK_IMAGE_TILING_OPTIMAL) || !ImageFormatIsSupported(gpu(), f_float, VK_IMAGE_TILING_OPTIMAL) || !ImageFormatIsSupported(gpu(), f_depth, VK_IMAGE_TILING_OPTIMAL) || !ImageFormatIsSupported(gpu(), f_depth2, VK_IMAGE_TILING_OPTIMAL)) { printf("%s Requested formats not supported - BlitImageFormatTypes skipped.\n", kSkipPrefix); return; } // Note any missing feature bits bool usrc = !ImageFormatAndFeaturesSupported(gpu(), f_unsigned, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_SRC_BIT); bool udst = !ImageFormatAndFeaturesSupported(gpu(), f_unsigned, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_DST_BIT); bool ssrc = !ImageFormatAndFeaturesSupported(gpu(), f_signed, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_SRC_BIT); bool sdst = !ImageFormatAndFeaturesSupported(gpu(), f_signed, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_DST_BIT); bool fsrc = !ImageFormatAndFeaturesSupported(gpu(), f_float, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_SRC_BIT); bool fdst = !ImageFormatAndFeaturesSupported(gpu(), f_float, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_DST_BIT); bool d1dst = !ImageFormatAndFeaturesSupported(gpu(), f_depth, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_DST_BIT); bool d2src = !ImageFormatAndFeaturesSupported(gpu(), f_depth2, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_SRC_BIT); VkImageObj unsigned_image(m_device); unsigned_image.Init(64, 64, 1, f_unsigned, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(unsigned_image.initialized()); unsigned_image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL); VkImageObj signed_image(m_device); signed_image.Init(64, 64, 1, f_signed, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(signed_image.initialized()); signed_image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL); VkImageObj float_image(m_device); float_image.Init(64, 64, 1, f_float, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(float_image.initialized()); float_image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL); VkImageObj depth_image(m_device); depth_image.Init(64, 64, 1, f_depth, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(depth_image.initialized()); depth_image.SetLayout(VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_DEPTH_BIT, VK_IMAGE_LAYOUT_GENERAL); VkImageObj depth_image2(m_device); depth_image2.Init(64, 64, 1, f_depth2, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(depth_image2.initialized()); depth_image2.SetLayout(VK_IMAGE_ASPECT_DEPTH_BIT, VK_IMAGE_LAYOUT_GENERAL); VkImageBlit blitRegion = {}; blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.srcSubresource.baseArrayLayer = 0; blitRegion.srcSubresource.layerCount = 1; blitRegion.srcSubresource.mipLevel = 0; blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.dstSubresource.baseArrayLayer = 0; blitRegion.dstSubresource.layerCount = 1; blitRegion.dstSubresource.mipLevel = 0; blitRegion.srcOffsets[0] = {0, 0, 0}; blitRegion.srcOffsets[1] = {64, 64, 1}; blitRegion.dstOffsets[0] = {0, 0, 0}; blitRegion.dstOffsets[1] = {32, 32, 1}; m_commandBuffer->begin(); // Unsigned int vs not an int m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00230"); if (usrc) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-01999"); if (fdst) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-02000"); vkCmdBlitImage(m_commandBuffer->handle(), unsigned_image.image(), unsigned_image.Layout(), float_image.image(), float_image.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00230"); if (fsrc) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-01999"); if (udst) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-02000"); vkCmdBlitImage(m_commandBuffer->handle(), float_image.image(), float_image.Layout(), unsigned_image.image(), unsigned_image.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); // Signed int vs not an int, m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00229"); if (ssrc) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-01999"); if (fdst) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-02000"); vkCmdBlitImage(m_commandBuffer->handle(), signed_image.image(), signed_image.Layout(), float_image.image(), float_image.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00229"); if (fsrc) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-01999"); if (sdst) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-02000"); vkCmdBlitImage(m_commandBuffer->handle(), float_image.image(), float_image.Layout(), signed_image.image(), signed_image.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); // Signed vs Unsigned int - generates both VUs m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00229"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00230"); if (ssrc) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-01999"); if (udst) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-02000"); vkCmdBlitImage(m_commandBuffer->handle(), signed_image.image(), signed_image.Layout(), unsigned_image.image(), unsigned_image.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00229"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00230"); if (usrc) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-01999"); if (sdst) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-02000"); vkCmdBlitImage(m_commandBuffer->handle(), unsigned_image.image(), unsigned_image.Layout(), signed_image.image(), signed_image.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); // Depth vs any non-identical depth format m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00231"); blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; if (d2src) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-01999"); if (d1dst) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-02000"); vkCmdBlitImage(m_commandBuffer->handle(), depth_image2.image(), depth_image2.Layout(), depth_image.image(), depth_image.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, BlitImageFilters) { bool cubic_support = false; ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, "VK_IMG_filter_cubic")) { m_device_extension_names.push_back("VK_IMG_filter_cubic"); cubic_support = true; } ASSERT_NO_FATAL_FAILURE(InitState()); VkFormat fmt = VK_FORMAT_R8_UINT; if (!ImageFormatIsSupported(gpu(), fmt, VK_IMAGE_TILING_OPTIMAL)) { printf("%s No R8_UINT format support - BlitImageFilters skipped.\n", kSkipPrefix); return; } // Create 2D images VkImageObj src2D(m_device); VkImageObj dst2D(m_device); src2D.Init(64, 64, 1, fmt, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL, 0); dst2D.Init(64, 64, 1, fmt, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(src2D.initialized()); ASSERT_TRUE(dst2D.initialized()); src2D.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL); dst2D.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL); // Create 3D image VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_3D; ci.format = fmt; ci.extent = {64, 64, 4}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; VkImageObj src3D(m_device); src3D.init(&ci); ASSERT_TRUE(src3D.initialized()); VkImageBlit blitRegion = {}; blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.srcSubresource.baseArrayLayer = 0; blitRegion.srcSubresource.layerCount = 1; blitRegion.srcSubresource.mipLevel = 0; blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.dstSubresource.baseArrayLayer = 0; blitRegion.dstSubresource.layerCount = 1; blitRegion.dstSubresource.mipLevel = 0; blitRegion.srcOffsets[0] = {0, 0, 0}; blitRegion.srcOffsets[1] = {48, 48, 1}; blitRegion.dstOffsets[0] = {0, 0, 0}; blitRegion.dstOffsets[1] = {64, 64, 1}; m_commandBuffer->begin(); // UINT format should not support linear filtering, but check to be sure if (!ImageFormatAndFeaturesSupported(gpu(), fmt, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT)) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-filter-02001"); vkCmdBlitImage(m_commandBuffer->handle(), src2D.image(), src2D.Layout(), dst2D.image(), dst2D.Layout(), 1, &blitRegion, VK_FILTER_LINEAR); m_errorMonitor->VerifyFound(); } if (cubic_support && !ImageFormatAndFeaturesSupported(gpu(), fmt, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG)) { // Invalid filter CUBIC_IMG m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-filter-02002"); vkCmdBlitImage(m_commandBuffer->handle(), src3D.image(), src3D.Layout(), dst2D.image(), dst2D.Layout(), 1, &blitRegion, VK_FILTER_CUBIC_IMG); m_errorMonitor->VerifyFound(); // Invalid filter CUBIC_IMG + invalid 2D source image m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-filter-02002"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-filter-00237"); vkCmdBlitImage(m_commandBuffer->handle(), src2D.image(), src2D.Layout(), dst2D.image(), dst2D.Layout(), 1, &blitRegion, VK_FILTER_CUBIC_IMG); m_errorMonitor->VerifyFound(); } m_commandBuffer->end(); } TEST_F(VkLayerTest, BlitImageLayout) { TEST_DESCRIPTION("Incorrect vkCmdBlitImage layouts"); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); VkResult err; VkFormat fmt = VK_FORMAT_R8G8B8A8_UNORM; VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); // Create images VkImageObj img_src_transfer(m_device); VkImageObj img_dst_transfer(m_device); VkImageObj img_general(m_device); VkImageObj img_color(m_device); img_src_transfer.InitNoLayout(64, 64, 1, fmt, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); img_dst_transfer.InitNoLayout(64, 64, 1, fmt, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); img_general.InitNoLayout(64, 64, 1, fmt, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); img_color.InitNoLayout(64, 64, 1, fmt, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(img_src_transfer.initialized()); ASSERT_TRUE(img_dst_transfer.initialized()); ASSERT_TRUE(img_general.initialized()); ASSERT_TRUE(img_color.initialized()); img_src_transfer.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); img_dst_transfer.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); img_general.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL); img_color.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); VkImageBlit blit_region = {}; blit_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blit_region.srcSubresource.baseArrayLayer = 0; blit_region.srcSubresource.layerCount = 1; blit_region.srcSubresource.mipLevel = 0; blit_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blit_region.dstSubresource.baseArrayLayer = 0; blit_region.dstSubresource.layerCount = 1; blit_region.dstSubresource.mipLevel = 0; blit_region.srcOffsets[0] = {0, 0, 0}; blit_region.srcOffsets[1] = {48, 48, 1}; blit_region.dstOffsets[0] = {0, 0, 0}; blit_region.dstOffsets[1] = {64, 64, 1}; m_commandBuffer->begin(); // Illegal srcImageLayout m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImageLayout-00222"); vkCmdBlitImage(m_commandBuffer->handle(), img_src_transfer.image(), VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, img_dst_transfer.image(), img_dst_transfer.Layout(), 1, &blit_region, VK_FILTER_LINEAR); m_errorMonitor->VerifyFound(); // Illegal destImageLayout m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImageLayout-00227"); vkCmdBlitImage(m_commandBuffer->handle(), img_src_transfer.image(), img_src_transfer.Layout(), img_dst_transfer.image(), VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, 1, &blit_region, VK_FILTER_LINEAR); m_commandBuffer->end(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); err = vkQueueWaitIdle(m_device->m_queue); ASSERT_VK_SUCCESS(err); m_commandBuffer->reset(0); m_commandBuffer->begin(); // Source image in invalid layout at start of the CB m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "UNASSIGNED-CoreValidation-DrawState-InvalidImageLayout"); vkCmdBlitImage(m_commandBuffer->handle(), img_src_transfer.image(), img_src_transfer.Layout(), img_color.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &blit_region, VK_FILTER_LINEAR); m_commandBuffer->end(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); err = vkQueueWaitIdle(m_device->m_queue); ASSERT_VK_SUCCESS(err); m_commandBuffer->reset(0); m_commandBuffer->begin(); // Destination image in invalid layout at start of the CB m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "UNASSIGNED-CoreValidation-DrawState-InvalidImageLayout"); vkCmdBlitImage(m_commandBuffer->handle(), img_color.image(), VK_IMAGE_LAYOUT_GENERAL, img_dst_transfer.image(), img_dst_transfer.Layout(), 1, &blit_region, VK_FILTER_LINEAR); m_commandBuffer->end(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); err = vkQueueWaitIdle(m_device->m_queue); ASSERT_VK_SUCCESS(err); // Source image in invalid layout in the middle of CB m_commandBuffer->reset(0); m_commandBuffer->begin(); VkImageMemoryBarrier img_barrier = {}; img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier.pNext = nullptr; img_barrier.srcAccessMask = 0; img_barrier.dstAccessMask = 0; img_barrier.oldLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; img_barrier.image = img_general.handle(); img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImageLayout-00221"); vkCmdBlitImage(m_commandBuffer->handle(), img_general.image(), VK_IMAGE_LAYOUT_GENERAL, img_dst_transfer.image(), img_dst_transfer.Layout(), 1, &blit_region, VK_FILTER_LINEAR); m_commandBuffer->end(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); err = vkQueueWaitIdle(m_device->m_queue); ASSERT_VK_SUCCESS(err); // Destination image in invalid layout in the middle of CB m_commandBuffer->reset(0); m_commandBuffer->begin(); img_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; img_barrier.image = img_dst_transfer.handle(); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImageLayout-00226"); vkCmdBlitImage(m_commandBuffer->handle(), img_src_transfer.image(), img_src_transfer.Layout(), img_dst_transfer.image(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &blit_region, VK_FILTER_LINEAR); m_commandBuffer->end(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); err = vkQueueWaitIdle(m_device->m_queue); ASSERT_VK_SUCCESS(err); } TEST_F(VkLayerTest, BlitImageOffsets) { ASSERT_NO_FATAL_FAILURE(Init()); VkFormat fmt = VK_FORMAT_R8G8B8A8_UNORM; if (!ImageFormatAndFeaturesSupported(gpu(), fmt, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_SRC_BIT | VK_FORMAT_FEATURE_BLIT_DST_BIT)) { printf("%s No blit feature bits - BlitImageOffsets skipped.\n", kSkipPrefix); return; } VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_1D; ci.format = fmt; ci.extent = {64, 1, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; VkImageObj image_1D(m_device); image_1D.init(&ci); ASSERT_TRUE(image_1D.initialized()); ci.imageType = VK_IMAGE_TYPE_2D; ci.extent = {64, 64, 1}; VkImageObj image_2D(m_device); image_2D.init(&ci); ASSERT_TRUE(image_2D.initialized()); ci.imageType = VK_IMAGE_TYPE_3D; ci.extent = {64, 64, 64}; VkImageObj image_3D(m_device); image_3D.init(&ci); ASSERT_TRUE(image_3D.initialized()); VkImageBlit blit_region = {}; blit_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blit_region.srcSubresource.baseArrayLayer = 0; blit_region.srcSubresource.layerCount = 1; blit_region.srcSubresource.mipLevel = 0; blit_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blit_region.dstSubresource.baseArrayLayer = 0; blit_region.dstSubresource.layerCount = 1; blit_region.dstSubresource.mipLevel = 0; m_commandBuffer->begin(); // 1D, with src/dest y offsets other than (0,1) blit_region.srcOffsets[0] = {0, 1, 0}; blit_region.srcOffsets[1] = {30, 1, 1}; blit_region.dstOffsets[0] = {32, 0, 0}; blit_region.dstOffsets[1] = {64, 1, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcImage-00245"); vkCmdBlitImage(m_commandBuffer->handle(), image_1D.image(), image_1D.Layout(), image_1D.image(), image_1D.Layout(), 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); blit_region.srcOffsets[0] = {0, 0, 0}; blit_region.dstOffsets[0] = {32, 1, 0}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-dstImage-00250"); vkCmdBlitImage(m_commandBuffer->handle(), image_1D.image(), image_1D.Layout(), image_1D.image(), image_1D.Layout(), 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); // 2D, with src/dest z offsets other than (0,1) blit_region.srcOffsets[0] = {0, 0, 1}; blit_region.srcOffsets[1] = {24, 31, 1}; blit_region.dstOffsets[0] = {32, 32, 0}; blit_region.dstOffsets[1] = {64, 64, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcImage-00247"); vkCmdBlitImage(m_commandBuffer->handle(), image_2D.image(), image_2D.Layout(), image_2D.image(), image_2D.Layout(), 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); blit_region.srcOffsets[0] = {0, 0, 0}; blit_region.dstOffsets[0] = {32, 32, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-dstImage-00252"); vkCmdBlitImage(m_commandBuffer->handle(), image_2D.image(), image_2D.Layout(), image_2D.image(), image_2D.Layout(), 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); // Source offsets exceeding source image dimensions blit_region.srcOffsets[0] = {0, 0, 0}; blit_region.srcOffsets[1] = {65, 64, 1}; // src x blit_region.dstOffsets[0] = {0, 0, 0}; blit_region.dstOffsets[1] = {64, 64, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcOffset-00243"); // x m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-pRegions-00215"); // src region vkCmdBlitImage(m_commandBuffer->handle(), image_3D.image(), image_3D.Layout(), image_2D.image(), image_2D.Layout(), 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); blit_region.srcOffsets[1] = {64, 65, 1}; // src y m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcOffset-00244"); // y m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-pRegions-00215"); // src region vkCmdBlitImage(m_commandBuffer->handle(), image_3D.image(), image_3D.Layout(), image_2D.image(), image_2D.Layout(), 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); blit_region.srcOffsets[0] = {0, 0, 65}; // src z blit_region.srcOffsets[1] = {64, 64, 64}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcOffset-00246"); // z m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-pRegions-00215"); // src region vkCmdBlitImage(m_commandBuffer->handle(), image_3D.image(), image_3D.Layout(), image_2D.image(), image_2D.Layout(), 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); // Dest offsets exceeding source image dimensions blit_region.srcOffsets[0] = {0, 0, 0}; blit_region.srcOffsets[1] = {64, 64, 1}; blit_region.dstOffsets[0] = {96, 64, 32}; // dst x blit_region.dstOffsets[1] = {64, 0, 33}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-dstOffset-00248"); // x m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-pRegions-00216"); // dst region vkCmdBlitImage(m_commandBuffer->handle(), image_2D.image(), image_2D.Layout(), image_3D.image(), image_3D.Layout(), 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); blit_region.dstOffsets[0] = {0, 65, 32}; // dst y m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-dstOffset-00249"); // y m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-pRegions-00216"); // dst region vkCmdBlitImage(m_commandBuffer->handle(), image_2D.image(), image_2D.Layout(), image_3D.image(), image_3D.Layout(), 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); blit_region.dstOffsets[0] = {0, 64, 65}; // dst z blit_region.dstOffsets[1] = {64, 0, 64}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-dstOffset-00251"); // z m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-pRegions-00216"); // dst region vkCmdBlitImage(m_commandBuffer->handle(), image_2D.image(), image_2D.Layout(), image_3D.image(), image_3D.Layout(), 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, MiscBlitImageTests) { ASSERT_NO_FATAL_FAILURE(Init()); VkFormat f_color = VK_FORMAT_R32_SFLOAT; // Need features ..BLIT_SRC_BIT & ..BLIT_DST_BIT if (!ImageFormatAndFeaturesSupported(gpu(), f_color, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_SRC_BIT | VK_FORMAT_FEATURE_BLIT_DST_BIT)) { printf("%s Requested format features unavailable - MiscBlitImageTests skipped.\n", kSkipPrefix); return; } VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = f_color; ci.extent = {64, 64, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // 2D color image VkImageObj color_img(m_device); color_img.init(&ci); ASSERT_TRUE(color_img.initialized()); // 2D multi-sample image ci.samples = VK_SAMPLE_COUNT_4_BIT; VkImageObj ms_img(m_device); ms_img.init(&ci); ASSERT_TRUE(ms_img.initialized()); // 3D color image ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.imageType = VK_IMAGE_TYPE_3D; ci.extent = {64, 64, 8}; VkImageObj color_3D_img(m_device); color_3D_img.init(&ci); ASSERT_TRUE(color_3D_img.initialized()); VkImageBlit blitRegion = {}; blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.srcSubresource.baseArrayLayer = 0; blitRegion.srcSubresource.layerCount = 1; blitRegion.srcSubresource.mipLevel = 0; blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.dstSubresource.baseArrayLayer = 0; blitRegion.dstSubresource.layerCount = 1; blitRegion.dstSubresource.mipLevel = 0; blitRegion.srcOffsets[0] = {0, 0, 0}; blitRegion.srcOffsets[1] = {16, 16, 1}; blitRegion.dstOffsets[0] = {32, 32, 0}; blitRegion.dstOffsets[1] = {64, 64, 1}; m_commandBuffer->begin(); // Blit with aspectMask errors blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-aspectMask-00241"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-aspectMask-00242"); vkCmdBlitImage(m_commandBuffer->handle(), color_img.image(), color_img.Layout(), color_img.image(), color_img.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); // Blit with invalid src mip level blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.srcSubresource.mipLevel = ci.mipLevels; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcSubresource-01705"); // invalid srcSubresource.mipLevel // Redundant unavoidable errors m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcOffset-00243"); // out-of-bounds srcOffset.x m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcOffset-00244"); // out-of-bounds srcOffset.y m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcOffset-00246"); // out-of-bounds srcOffset.z m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-pRegions-00215"); // region not contained within src image vkCmdBlitImage(m_commandBuffer->handle(), color_img.image(), color_img.Layout(), color_img.image(), color_img.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); // Blit with invalid dst mip level blitRegion.srcSubresource.mipLevel = 0; blitRegion.dstSubresource.mipLevel = ci.mipLevels; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstSubresource-01706"); // invalid dstSubresource.mipLevel // Redundant unavoidable errors m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-dstOffset-00248"); // out-of-bounds dstOffset.x m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-dstOffset-00249"); // out-of-bounds dstOffset.y m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-dstOffset-00251"); // out-of-bounds dstOffset.z m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-pRegions-00216"); // region not contained within dst image vkCmdBlitImage(m_commandBuffer->handle(), color_img.image(), color_img.Layout(), color_img.image(), color_img.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); // Blit with invalid src array layer blitRegion.dstSubresource.mipLevel = 0; blitRegion.srcSubresource.baseArrayLayer = ci.arrayLayers; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcSubresource-01707"); // invalid srcSubresource layer range vkCmdBlitImage(m_commandBuffer->handle(), color_img.image(), color_img.Layout(), color_img.image(), color_img.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); // Blit with invalid dst array layer blitRegion.srcSubresource.baseArrayLayer = 0; blitRegion.dstSubresource.baseArrayLayer = ci.arrayLayers; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstSubresource-01708"); // invalid dstSubresource layer range // Redundant unavoidable errors vkCmdBlitImage(m_commandBuffer->handle(), color_img.image(), color_img.Layout(), color_img.image(), color_img.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); blitRegion.dstSubresource.baseArrayLayer = 0; // Blit multi-sample image // TODO: redundant VUs, one (1c8) or two (1d2 & 1d4) should be eliminated. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00228"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00233"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-00234"); vkCmdBlitImage(m_commandBuffer->handle(), ms_img.image(), ms_img.Layout(), ms_img.image(), ms_img.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); // Blit 3D with baseArrayLayer != 0 or layerCount != 1 blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.srcSubresource.baseArrayLayer = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcImage-00240"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcSubresource-01707"); // base+count > total layer count vkCmdBlitImage(m_commandBuffer->handle(), color_3D_img.image(), color_3D_img.Layout(), color_3D_img.image(), color_3D_img.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); blitRegion.srcSubresource.baseArrayLayer = 0; blitRegion.srcSubresource.layerCount = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcImage-00240"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresourceLayers-layerCount-01700"); // layer count == 0 (src) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-layerCount-00239"); // src/dst layer count mismatch vkCmdBlitImage(m_commandBuffer->handle(), color_3D_img.image(), color_3D_img.Layout(), color_3D_img.image(), color_3D_img.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, BlitToDepthImageTests) { ASSERT_NO_FATAL_FAILURE(Init()); // Need feature ..BLIT_SRC_BIT but not ..BLIT_DST_BIT // TODO: provide more choices here; supporting D32_SFLOAT as BLIT_DST isn't unheard of. VkFormat f_depth = VK_FORMAT_D32_SFLOAT; if (!ImageFormatAndFeaturesSupported(gpu(), f_depth, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_SRC_BIT) || ImageFormatAndFeaturesSupported(gpu(), f_depth, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_DST_BIT)) { printf("%s Requested format features unavailable - BlitToDepthImageTests skipped.\n", kSkipPrefix); return; } VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = f_depth; ci.extent = {64, 64, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // 2D depth image VkImageObj depth_img(m_device); depth_img.init(&ci); ASSERT_TRUE(depth_img.initialized()); VkImageBlit blitRegion = {}; blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.srcSubresource.baseArrayLayer = 0; blitRegion.srcSubresource.layerCount = 1; blitRegion.srcSubresource.mipLevel = 0; blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.dstSubresource.baseArrayLayer = 0; blitRegion.dstSubresource.layerCount = 1; blitRegion.dstSubresource.mipLevel = 0; blitRegion.srcOffsets[0] = {0, 0, 0}; blitRegion.srcOffsets[1] = {16, 16, 1}; blitRegion.dstOffsets[0] = {32, 32, 0}; blitRegion.dstOffsets[1] = {64, 64, 1}; m_commandBuffer->begin(); // Blit depth image - has SRC_BIT but not DST_BIT blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-02000"); vkCmdBlitImage(m_commandBuffer->handle(), depth_img.image(), depth_img.Layout(), depth_img.image(), depth_img.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, MinImageTransferGranularity) { TEST_DESCRIPTION("Tests for validation of Queue Family property minImageTransferGranularity."); ASSERT_NO_FATAL_FAILURE(Init()); auto queue_family_properties = m_device->phy().queue_properties(); auto large_granularity_family = std::find_if(queue_family_properties.begin(), queue_family_properties.end(), [](VkQueueFamilyProperties family_properties) { VkExtent3D family_granularity = family_properties.minImageTransferGranularity; // We need a queue family that supports copy operations and has a large enough minImageTransferGranularity for the tests // below to make sense. return (family_properties.queueFlags & VK_QUEUE_TRANSFER_BIT || family_properties.queueFlags & VK_QUEUE_GRAPHICS_BIT || family_properties.queueFlags & VK_QUEUE_COMPUTE_BIT) && family_granularity.depth >= 4 && family_granularity.width >= 4 && family_granularity.height >= 4; }); if (large_granularity_family == queue_family_properties.end()) { printf("%s No queue family has a large enough granularity for this test to be meaningful, skipping test\n", kSkipPrefix); return; } const size_t queue_family_index = std::distance(queue_family_properties.begin(), large_granularity_family); VkExtent3D granularity = queue_family_properties[queue_family_index].minImageTransferGranularity; VkCommandPoolObj command_pool(m_device, queue_family_index, 0); // Create two images of different types and try to copy between them VkImage srcImage; VkImage dstImage; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_3D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = granularity.width * 2; image_create_info.extent.height = granularity.height * 2; image_create_info.extent.depth = granularity.depth * 2; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image_create_info.flags = 0; VkImageObj src_image_obj(m_device); src_image_obj.init(&image_create_info); ASSERT_TRUE(src_image_obj.initialized()); srcImage = src_image_obj.handle(); image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; VkImageObj dst_image_obj(m_device); dst_image_obj.init(&image_create_info); ASSERT_TRUE(dst_image_obj.initialized()); dstImage = dst_image_obj.handle(); VkCommandBufferObj command_buffer(m_device, &command_pool); ASSERT_TRUE(command_buffer.initialized()); command_buffer.begin(); VkImageCopy copyRegion; copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.srcSubresource.mipLevel = 0; copyRegion.srcSubresource.baseArrayLayer = 0; copyRegion.srcSubresource.layerCount = 1; copyRegion.srcOffset.x = 0; copyRegion.srcOffset.y = 0; copyRegion.srcOffset.z = 0; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.dstSubresource.mipLevel = 0; copyRegion.dstSubresource.baseArrayLayer = 0; copyRegion.dstSubresource.layerCount = 1; copyRegion.dstOffset.x = 0; copyRegion.dstOffset.y = 0; copyRegion.dstOffset.z = 0; copyRegion.extent.width = granularity.width; copyRegion.extent.height = granularity.height; copyRegion.extent.depth = granularity.depth; // Introduce failure by setting srcOffset to a bad granularity value copyRegion.srcOffset.y = 3; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcOffset-01783"); // srcOffset image transfer granularity command_buffer.CopyImage(srcImage, VK_IMAGE_LAYOUT_GENERAL, dstImage, VK_IMAGE_LAYOUT_GENERAL, 1, ©Region); m_errorMonitor->VerifyFound(); // Introduce failure by setting extent to a granularity value that is bad // for both the source and destination image. copyRegion.srcOffset.y = 0; copyRegion.extent.width = 3; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcOffset-01783"); // src extent image transfer granularity m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-dstOffset-01784"); // dst extent image transfer granularity command_buffer.CopyImage(srcImage, VK_IMAGE_LAYOUT_GENERAL, dstImage, VK_IMAGE_LAYOUT_GENERAL, 1, ©Region); m_errorMonitor->VerifyFound(); // Now do some buffer/image copies VkBufferObj buffer; VkMemoryPropertyFlags reqs = 0; buffer.init_as_src_and_dst(*m_device, 8 * granularity.height * granularity.width * granularity.depth, reqs); VkBufferImageCopy region = {}; region.bufferOffset = 0; region.bufferRowLength = 0; region.bufferImageHeight = 0; region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; region.imageSubresource.layerCount = 1; region.imageExtent.height = granularity.height; region.imageExtent.width = granularity.width; region.imageExtent.depth = granularity.depth; region.imageOffset.x = 0; region.imageOffset.y = 0; region.imageOffset.z = 0; // Introduce failure by setting imageExtent to a bad granularity value region.imageExtent.width = 3; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-imageOffset-01794"); // image transfer granularity vkCmdCopyImageToBuffer(command_buffer.handle(), srcImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer.handle(), 1, ®ion); m_errorMonitor->VerifyFound(); region.imageExtent.width = granularity.width; // Introduce failure by setting imageOffset to a bad granularity value region.imageOffset.z = 3; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-imageOffset-01793"); // image transfer granularity vkCmdCopyBufferToImage(command_buffer.handle(), buffer.handle(), dstImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion); m_errorMonitor->VerifyFound(); command_buffer.end(); } TEST_F(VkLayerTest, MismatchedQueueFamiliesOnSubmit) { TEST_DESCRIPTION( "Submit command buffer created using one queue family and attempt to submit them on a queue created in a different queue " "family."); ASSERT_NO_FATAL_FAILURE(Init()); // assumes it initializes all queue families on vkCreateDevice // This test is meaningless unless we have multiple queue families auto queue_family_properties = m_device->phy().queue_properties(); std::vector queue_families; for (uint32_t i = 0; i < queue_family_properties.size(); ++i) if (queue_family_properties[i].queueCount > 0) queue_families.push_back(i); if (queue_families.size() < 2) { printf("%s Device only has one queue family; skipped.\n", kSkipPrefix); return; } const uint32_t queue_family = queue_families[0]; const uint32_t other_queue_family = queue_families[1]; VkQueue other_queue; vkGetDeviceQueue(m_device->device(), other_queue_family, 0, &other_queue); VkCommandPoolObj cmd_pool(m_device, queue_family); VkCommandBufferObj cmd_buff(m_device, &cmd_pool); cmd_buff.begin(); cmd_buff.end(); // Submit on the wrong queue VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &cmd_buff.handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkQueueSubmit-pCommandBuffers-00074"); vkQueueSubmit(other_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DrawWithPipelineIncompatibleWithSubpass) { TEST_DESCRIPTION("Use a pipeline for the wrong subpass in a render pass instance"); ASSERT_NO_FATAL_FAILURE(Init()); // A renderpass with two subpasses, both writing the same attachment. VkAttachmentDescription attach[] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr}, {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr}, }; VkSubpassDependency dep = {0, 1, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_DEPENDENCY_BY_REGION_BIT}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 2, subpasses, 1, &dep}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); VkImageObj image(m_device); image.InitNoLayout(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkImageView imageView = image.targetView(VK_FORMAT_R8G8B8A8_UNORM); VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &imageView, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fbci, nullptr, &fb); ASSERT_VK_SUCCESS(err); char const *vsSource = "#version 450\n" "void main() { gl_Position = vec4(1); }\n"; char const *fsSource = "#version 450\n" "layout(location=0) out vec4 color;\n" "void main() { color = vec4(1); }\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; m_viewports.push_back(viewport); pipe.SetViewport(m_viewports); VkRect2D rect = {}; m_scissors.push_back(rect); pipe.SetScissor(m_scissors); const VkPipelineLayoutObj pl(m_device); pipe.CreateVKPipeline(pl.handle(), rp); m_commandBuffer->begin(); VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{ 0, 0, }, {32, 32}}, 0, nullptr}; // subtest 1: bind in the wrong subpass vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); vkCmdNextSubpass(m_commandBuffer->handle(), VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "built for subpass 0 but used in subpass 1"); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); m_errorMonitor->VerifyFound(); vkCmdEndRenderPass(m_commandBuffer->handle()); // subtest 2: bind in correct subpass, then transition to next subpass vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdNextSubpass(m_commandBuffer->handle(), VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "built for subpass 0 but used in subpass 1"); vkCmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); m_errorMonitor->VerifyFound(); vkCmdEndRenderPass(m_commandBuffer->handle()); m_commandBuffer->end(); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkLayerTest, ImageBarrierSubpassConflicts) { TEST_DESCRIPTION("Add a pipeline barrier within a subpass that has conflicting state"); ASSERT_NO_FATAL_FAILURE(Init()); // A renderpass with a single subpass that declared a self-dependency VkAttachmentDescription attach[] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr}, }; VkSubpassDependency dep = {0, 0, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_DEPENDENCY_BY_REGION_BIT}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 1, subpasses, 1, &dep}; VkRenderPass rp; VkRenderPass rp_noselfdep; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); rpci.dependencyCount = 0; rpci.pDependencies = nullptr; err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp_noselfdep); ASSERT_VK_SUCCESS(err); VkImageObj image(m_device); image.InitNoLayout(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkImageView imageView = image.targetView(VK_FORMAT_R8G8B8A8_UNORM); VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &imageView, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fbci, nullptr, &fb); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pDependencies-02285"); m_commandBuffer->begin(); VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp_noselfdep, fb, {{ 0, 0, }, {32, 32}}, 0, nullptr}; vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); VkMemoryBarrier mem_barrier = {}; mem_barrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER; mem_barrier.pNext = NULL; mem_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; mem_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 1, &mem_barrier, 0, nullptr, 0, nullptr); m_errorMonitor->VerifyFound(); vkCmdEndRenderPass(m_commandBuffer->handle()); rpbi.renderPass = rp; vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); VkImageMemoryBarrier img_barrier = {}; img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; img_barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.image = image.handle(); img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; // Mis-match src stage mask m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pDependencies-02285"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pDependencies-02285"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // Now mis-match dst stage mask m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pDependencies-02285"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pDependencies-02285"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_HOST_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // Set srcQueueFamilyIndex to something other than IGNORED img_barrier.srcQueueFamilyIndex = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-srcQueueFamilyIndex-01182"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; // Mis-match mem barrier src access mask mem_barrier = {}; mem_barrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER; mem_barrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT; mem_barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pDependencies-02285"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pDependencies-02285"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 1, &mem_barrier, 0, nullptr, 0, nullptr); m_errorMonitor->VerifyFound(); // Mis-match mem barrier dst access mask. Also set srcAccessMask to 0 which should not cause an error mem_barrier.srcAccessMask = 0; mem_barrier.dstAccessMask = VK_ACCESS_HOST_WRITE_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pDependencies-02285"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pDependencies-02285"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 1, &mem_barrier, 0, nullptr, 0, nullptr); m_errorMonitor->VerifyFound(); // Mis-match image barrier src access mask img_barrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pDependencies-02285"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pDependencies-02285"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // Mis-match image barrier dst access mask img_barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; img_barrier.dstAccessMask = VK_ACCESS_HOST_WRITE_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pDependencies-02285"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pDependencies-02285"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // Mis-match dependencyFlags img_barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pDependencies-02285"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0 /* wrong */, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // Send non-zero bufferMemoryBarrierCount // Construct a valid BufferMemoryBarrier to avoid any parameter errors // First we need a valid buffer to reference VkBufferObj buffer; VkMemoryPropertyFlags mem_reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; buffer.init_as_src_and_dst(*m_device, 256, mem_reqs); VkBufferMemoryBarrier bmb = {}; bmb.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER; bmb.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; bmb.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; bmb.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; bmb.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; bmb.buffer = buffer.handle(); bmb.offset = 0; bmb.size = VK_WHOLE_SIZE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-bufferMemoryBarrierCount-01178"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 1, &bmb, 0, nullptr); m_errorMonitor->VerifyFound(); // Add image barrier w/ image handle that's not in framebuffer VkImageObj lone_image(m_device); lone_image.InitNoLayout(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); img_barrier.image = lone_image.handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-image-02635"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // Have image barrier with mis-matched layouts img_barrier.image = image.handle(); img_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-oldLayout-01181"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); img_barrier.oldLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-oldLayout-02636"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); vkCmdEndRenderPass(m_commandBuffer->handle()); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyRenderPass(m_device->device(), rp, nullptr); vkDestroyRenderPass(m_device->device(), rp_noselfdep, nullptr); } TEST_F(VkLayerTest, InvalidSecondaryCommandBufferBarrier) { TEST_DESCRIPTION("Add an invalid image barrier in a secondary command buffer"); ASSERT_NO_FATAL_FAILURE(Init()); // A renderpass with a single subpass that declared a self-dependency VkAttachmentDescription attach[] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr}, }; VkSubpassDependency dep = {0, 0, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_SHADER_WRITE_BIT, VK_DEPENDENCY_BY_REGION_BIT}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 1, subpasses, 1, &dep}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkImageView imageView = image.targetView(VK_FORMAT_R8G8B8A8_UNORM); // Second image that img_barrier will incorrectly use VkImageObj image2(m_device); image2.Init(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &imageView, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fbci, nullptr, &fb); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{ 0, 0, }, {32, 32}}, 0, nullptr}; vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS); VkCommandPoolObj pool(m_device, m_device->graphics_queue_node_index_, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT); VkCommandBufferObj secondary(m_device, &pool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); VkCommandBufferInheritanceInfo cbii = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO, nullptr, rp, 0, VK_NULL_HANDLE, // Set to NULL FB handle intentionally to flesh out any errors VK_FALSE, 0, 0}; VkCommandBufferBeginInfo cbbi = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT, &cbii}; vkBeginCommandBuffer(secondary.handle(), &cbbi); VkImageMemoryBarrier img_barrier = {}; img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT; img_barrier.dstAccessMask = VK_ACCESS_SHADER_WRITE_BIT; img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.image = image2.handle(); // Image mis-matches with FB image img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; vkCmdPipelineBarrier(secondary.handle(), VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); secondary.end(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-image-02635"); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); m_errorMonitor->VerifyFound(); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkLayerTest, ImageBarrierSubpassConflict) { TEST_DESCRIPTION("Check case where subpass index references different image from image barrier"); ASSERT_NO_FATAL_FAILURE(Init()); // Create RP/FB combo where subpass has incorrect index attachment, this is 2nd half of "VUID-vkCmdPipelineBarrier-image-02635" VkAttachmentDescription attach[] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; // ref attachment points to wrong attachment index compared to img_barrier below VkAttachmentReference ref = {1, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr}, }; VkSubpassDependency dep = {0, 0, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_DEPENDENCY_BY_REGION_BIT}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 2, attach, 1, subpasses, 1, &dep}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); VkImageObj image(m_device); image.InitNoLayout(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkImageView imageView = image.targetView(VK_FORMAT_R8G8B8A8_UNORM); VkImageObj image2(m_device); image2.InitNoLayout(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkImageView imageView2 = image2.targetView(VK_FORMAT_R8G8B8A8_UNORM); // re-use imageView from start of test VkImageView iv_array[2] = {imageView, imageView2}; VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 2, iv_array, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fbci, nullptr, &fb); ASSERT_VK_SUCCESS(err); VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{ 0, 0, }, {32, 32}}, 0, nullptr}; VkImageMemoryBarrier img_barrier = {}; img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; img_barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.image = image.handle(); /* barrier references image from attachment index 0 */ img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-image-02635"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkLayerTest, TemporaryExternalSemaphore) { #ifdef _WIN32 const auto extension_name = VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME; const auto handle_type = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR; #else const auto extension_name = VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME; const auto handle_type = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR; #endif // Check for external semaphore instance extensions if (InstanceExtensionSupported(VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME); m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s External semaphore extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Check for external semaphore device extensions if (DeviceExtensionSupported(gpu(), nullptr, extension_name)) { m_device_extension_names.push_back(extension_name); m_device_extension_names.push_back(VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME); } else { printf("%s External semaphore extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); // Check for external semaphore import and export capability VkPhysicalDeviceExternalSemaphoreInfoKHR esi = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR, nullptr, handle_type}; VkExternalSemaphorePropertiesKHR esp = {VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR, nullptr}; auto vkGetPhysicalDeviceExternalSemaphorePropertiesKHR = (PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR)vkGetInstanceProcAddr( instance(), "vkGetPhysicalDeviceExternalSemaphorePropertiesKHR"); vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(gpu(), &esi, &esp); if (!(esp.externalSemaphoreFeatures & VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR) || !(esp.externalSemaphoreFeatures & VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR)) { printf("%s External semaphore does not support importing and exporting, skipping test\n", kSkipPrefix); return; } VkResult err; // Create a semaphore to export payload from VkExportSemaphoreCreateInfoKHR esci = {VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR, nullptr, handle_type}; VkSemaphoreCreateInfo sci = {VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, &esci, 0}; VkSemaphore export_semaphore; err = vkCreateSemaphore(m_device->device(), &sci, nullptr, &export_semaphore); ASSERT_VK_SUCCESS(err); // Create a semaphore to import payload into sci.pNext = nullptr; VkSemaphore import_semaphore; err = vkCreateSemaphore(m_device->device(), &sci, nullptr, &import_semaphore); ASSERT_VK_SUCCESS(err); #ifdef _WIN32 // Export semaphore payload to an opaque handle HANDLE handle = nullptr; VkSemaphoreGetWin32HandleInfoKHR ghi = {VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR, nullptr, export_semaphore, handle_type}; auto vkGetSemaphoreWin32HandleKHR = (PFN_vkGetSemaphoreWin32HandleKHR)vkGetDeviceProcAddr(m_device->device(), "vkGetSemaphoreWin32HandleKHR"); err = vkGetSemaphoreWin32HandleKHR(m_device->device(), &ghi, &handle); ASSERT_VK_SUCCESS(err); // Import opaque handle exported above *temporarily* VkImportSemaphoreWin32HandleInfoKHR ihi = {VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR, nullptr, import_semaphore, VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR, handle_type, handle, nullptr}; auto vkImportSemaphoreWin32HandleKHR = (PFN_vkImportSemaphoreWin32HandleKHR)vkGetDeviceProcAddr(m_device->device(), "vkImportSemaphoreWin32HandleKHR"); err = vkImportSemaphoreWin32HandleKHR(m_device->device(), &ihi); ASSERT_VK_SUCCESS(err); #else // Export semaphore payload to an opaque handle int fd = 0; VkSemaphoreGetFdInfoKHR ghi = {VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR, nullptr, export_semaphore, handle_type}; auto vkGetSemaphoreFdKHR = (PFN_vkGetSemaphoreFdKHR)vkGetDeviceProcAddr(m_device->device(), "vkGetSemaphoreFdKHR"); err = vkGetSemaphoreFdKHR(m_device->device(), &ghi, &fd); ASSERT_VK_SUCCESS(err); // Import opaque handle exported above *temporarily* VkImportSemaphoreFdInfoKHR ihi = {VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR, nullptr, import_semaphore, VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR, handle_type, fd}; auto vkImportSemaphoreFdKHR = (PFN_vkImportSemaphoreFdKHR)vkGetDeviceProcAddr(m_device->device(), "vkImportSemaphoreFdKHR"); err = vkImportSemaphoreFdKHR(m_device->device(), &ihi); ASSERT_VK_SUCCESS(err); #endif // Wait on the imported semaphore twice in vkQueueSubmit, the second wait should be an error VkPipelineStageFlags flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; VkSubmitInfo si[] = { {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, &flags, 0, nullptr, 1, &export_semaphore}, {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 1, &import_semaphore, &flags, 0, nullptr, 0, nullptr}, {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, &flags, 0, nullptr, 1, &export_semaphore}, {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 1, &import_semaphore, &flags, 0, nullptr, 0, nullptr}, }; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "has no way to be signaled"); vkQueueSubmit(m_device->m_queue, 4, si, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); // Wait on the imported semaphore twice in vkQueueBindSparse, the second wait should be an error VkBindSparseInfo bi[] = { {VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 1, &export_semaphore}, {VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 1, &import_semaphore, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr}, {VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 1, &export_semaphore}, {VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 1, &import_semaphore, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr}, }; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "has no way to be signaled"); vkQueueBindSparse(m_device->m_queue, 4, bi, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); // Cleanup err = vkQueueWaitIdle(m_device->m_queue); ASSERT_VK_SUCCESS(err); vkDestroySemaphore(m_device->device(), export_semaphore, nullptr); vkDestroySemaphore(m_device->device(), import_semaphore, nullptr); } TEST_F(VkLayerTest, TemporaryExternalFence) { #ifdef _WIN32 const auto extension_name = VK_KHR_EXTERNAL_FENCE_WIN32_EXTENSION_NAME; const auto handle_type = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR; #else const auto extension_name = VK_KHR_EXTERNAL_FENCE_FD_EXTENSION_NAME; const auto handle_type = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR; #endif // Check for external fence instance extensions if (InstanceExtensionSupported(VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME); m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s External fence extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Check for external fence device extensions if (DeviceExtensionSupported(gpu(), nullptr, extension_name)) { m_device_extension_names.push_back(extension_name); m_device_extension_names.push_back(VK_KHR_EXTERNAL_FENCE_EXTENSION_NAME); } else { printf("%s External fence extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); // Check for external fence import and export capability VkPhysicalDeviceExternalFenceInfoKHR efi = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO_KHR, nullptr, handle_type}; VkExternalFencePropertiesKHR efp = {VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES_KHR, nullptr}; auto vkGetPhysicalDeviceExternalFencePropertiesKHR = (PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR)vkGetInstanceProcAddr( instance(), "vkGetPhysicalDeviceExternalFencePropertiesKHR"); vkGetPhysicalDeviceExternalFencePropertiesKHR(gpu(), &efi, &efp); if (!(efp.externalFenceFeatures & VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT_KHR) || !(efp.externalFenceFeatures & VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT_KHR)) { printf("%s External fence does not support importing and exporting, skipping test\n", kSkipPrefix); return; } VkResult err; // Create a fence to export payload from VkFence export_fence; { VkExportFenceCreateInfoKHR efci = {VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO_KHR, nullptr, handle_type}; VkFenceCreateInfo fci = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, &efci, 0}; err = vkCreateFence(m_device->device(), &fci, nullptr, &export_fence); ASSERT_VK_SUCCESS(err); } // Create a fence to import payload into VkFence import_fence; { VkFenceCreateInfo fci = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, nullptr, 0}; err = vkCreateFence(m_device->device(), &fci, nullptr, &import_fence); ASSERT_VK_SUCCESS(err); } #ifdef _WIN32 // Export fence payload to an opaque handle HANDLE handle = nullptr; { VkFenceGetWin32HandleInfoKHR ghi = {VK_STRUCTURE_TYPE_FENCE_GET_WIN32_HANDLE_INFO_KHR, nullptr, export_fence, handle_type}; auto vkGetFenceWin32HandleKHR = (PFN_vkGetFenceWin32HandleKHR)vkGetDeviceProcAddr(m_device->device(), "vkGetFenceWin32HandleKHR"); err = vkGetFenceWin32HandleKHR(m_device->device(), &ghi, &handle); ASSERT_VK_SUCCESS(err); } // Import opaque handle exported above { VkImportFenceWin32HandleInfoKHR ifi = {VK_STRUCTURE_TYPE_IMPORT_FENCE_WIN32_HANDLE_INFO_KHR, nullptr, import_fence, VK_FENCE_IMPORT_TEMPORARY_BIT_KHR, handle_type, handle, nullptr}; auto vkImportFenceWin32HandleKHR = (PFN_vkImportFenceWin32HandleKHR)vkGetDeviceProcAddr(m_device->device(), "vkImportFenceWin32HandleKHR"); err = vkImportFenceWin32HandleKHR(m_device->device(), &ifi); ASSERT_VK_SUCCESS(err); } #else // Export fence payload to an opaque handle int fd = 0; { VkFenceGetFdInfoKHR gfi = {VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR, nullptr, export_fence, handle_type}; auto vkGetFenceFdKHR = (PFN_vkGetFenceFdKHR)vkGetDeviceProcAddr(m_device->device(), "vkGetFenceFdKHR"); err = vkGetFenceFdKHR(m_device->device(), &gfi, &fd); ASSERT_VK_SUCCESS(err); } // Import opaque handle exported above { VkImportFenceFdInfoKHR ifi = {VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR, nullptr, import_fence, VK_FENCE_IMPORT_TEMPORARY_BIT_KHR, handle_type, fd}; auto vkImportFenceFdKHR = (PFN_vkImportFenceFdKHR)vkGetDeviceProcAddr(m_device->device(), "vkImportFenceFdKHR"); err = vkImportFenceFdKHR(m_device->device(), &ifi); ASSERT_VK_SUCCESS(err); } #endif // Undo the temporary import vkResetFences(m_device->device(), 1, &import_fence); // Signal the previously imported fence twice, the second signal should produce a validation error vkQueueSubmit(m_device->m_queue, 0, nullptr, import_fence); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "is already in use by another submission."); vkQueueSubmit(m_device->m_queue, 0, nullptr, import_fence); m_errorMonitor->VerifyFound(); // Cleanup err = vkQueueWaitIdle(m_device->m_queue); ASSERT_VK_SUCCESS(err); vkDestroyFence(m_device->device(), export_fence, nullptr); vkDestroyFence(m_device->device(), import_fence, nullptr); } TEST_F(VkPositiveLayerTest, SecondaryCommandBufferBarrier) { TEST_DESCRIPTION("Add a pipeline barrier in a secondary command buffer"); ASSERT_NO_FATAL_FAILURE(Init()); // A renderpass with a single subpass that declared a self-dependency VkAttachmentDescription attach[] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr}, }; VkSubpassDependency dep = {0, 0, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_SHADER_WRITE_BIT, VK_DEPENDENCY_BY_REGION_BIT}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 1, subpasses, 1, &dep}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkImageView imageView = image.targetView(VK_FORMAT_R8G8B8A8_UNORM); VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &imageView, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fbci, nullptr, &fb); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{ 0, 0, }, {32, 32}}, 0, nullptr}; vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS); VkCommandPoolObj pool(m_device, m_device->graphics_queue_node_index_, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT); VkCommandBufferObj secondary(m_device, &pool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); VkCommandBufferInheritanceInfo cbii = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO, nullptr, rp, 0, VK_NULL_HANDLE, // Set to NULL FB handle intentionally to flesh out any errors VK_FALSE, 0, 0}; VkCommandBufferBeginInfo cbbi = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT, &cbii}; vkBeginCommandBuffer(secondary.handle(), &cbbi); VkMemoryBarrier mem_barrier = {}; mem_barrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER; mem_barrier.pNext = NULL; mem_barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT; mem_barrier.dstAccessMask = VK_ACCESS_SHADER_WRITE_BIT; vkCmdPipelineBarrier(secondary.handle(), VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 1, &mem_barrier, 0, nullptr, 0, nullptr); VkImageMemoryBarrier img_barrier = {}; img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT; img_barrier.dstAccessMask = VK_ACCESS_SHADER_WRITE_BIT; img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.image = image.handle(); img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; vkCmdPipelineBarrier(secondary.handle(), VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); secondary.end(); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); vkCmdEndRenderPass(m_commandBuffer->handle()); m_commandBuffer->end(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); vkQueueWaitIdle(m_device->m_queue); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyRenderPass(m_device->device(), rp, nullptr); } static void TestRenderPassCreate(ErrorMonitor *error_monitor, const VkDevice device, const VkRenderPassCreateInfo *create_info, bool rp2Supported, const char *rp1_vuid, const char *rp2_vuid) { VkRenderPass render_pass = VK_NULL_HANDLE; VkResult err; if (rp1_vuid) { error_monitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, rp1_vuid); err = vkCreateRenderPass(device, create_info, nullptr, &render_pass); if (err == VK_SUCCESS) vkDestroyRenderPass(device, render_pass, nullptr); error_monitor->VerifyFound(); } if (rp2Supported && rp2_vuid) { PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR = (PFN_vkCreateRenderPass2KHR)vkGetDeviceProcAddr(device, "vkCreateRenderPass2KHR"); safe_VkRenderPassCreateInfo2KHR create_info2; ConvertVkRenderPassCreateInfoToV2KHR(create_info, &create_info2); error_monitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, rp2_vuid); err = vkCreateRenderPass2KHR(device, create_info2.ptr(), nullptr, &render_pass); if (err == VK_SUCCESS) vkDestroyRenderPass(device, render_pass, nullptr); error_monitor->VerifyFound(); } } TEST_F(VkLayerTest, RenderPassCreateAttachmentIndexOutOfRange) { // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); ASSERT_NO_FATAL_FAILURE(InitState()); // There are no attachments, but refer to attachment 0. VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr}, }; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 0, nullptr, 1, subpasses, 0, nullptr}; // "... must be less than the total number of attachments ..." TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkRenderPassCreateInfo-attachment-00834", "VUID-VkRenderPassCreateInfo2KHR-attachment-03051"); } TEST_F(VkLayerTest, RenderPassCreateAttachmentReadOnlyButCleared) { // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); bool maintenance2Supported = rp2Supported; // Check for VK_KHR_maintenance2 if (!rp2Supported && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME); maintenance2Supported = true; } ASSERT_NO_FATAL_FAILURE(InitState()); if (m_device->props.apiVersion < VK_API_VERSION_1_1) { maintenance2Supported = true; } VkAttachmentDescription description = {0, VK_FORMAT_D32_SFLOAT_S8_UINT, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_CLEAR, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_CLEAR, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL}; VkAttachmentReference depth_stencil_ref = {0, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, &depth_stencil_ref, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &description, 1, &subpass, 0, nullptr}; // VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL but depth cleared TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkRenderPassCreateInfo-pAttachments-00836", "VUID-VkRenderPassCreateInfo2KHR-pAttachments-02522"); if (maintenance2Supported) { // VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL but depth cleared depth_stencil_ref.layout = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkRenderPassCreateInfo-pAttachments-01566", nullptr); // VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL but depth cleared depth_stencil_ref.layout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkRenderPassCreateInfo-pAttachments-01567", nullptr); } } TEST_F(VkLayerTest, RenderPassCreateAttachmentMismatchingLayoutsColor) { TEST_DESCRIPTION("Attachment is used simultaneously as two color attachments with different layouts."); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); ASSERT_NO_FATAL_FAILURE(InitState()); VkAttachmentDescription attach[] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; VkAttachmentReference refs[] = { {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {0, VK_IMAGE_LAYOUT_GENERAL}, }; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 2, refs, nullptr, nullptr, 0, nullptr}, }; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 1, subpasses, 0, nullptr}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "subpass 0 already uses attachment 0 with a different image layout", "subpass 0 already uses attachment 0 with a different image layout"); } TEST_F(VkLayerTest, RenderPassCreateAttachmentDescriptionInvalidFinalLayout) { TEST_DESCRIPTION("VkAttachmentDescription's finalLayout must not be UNDEFINED or PREINITIALIZED"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); ASSERT_NO_FATAL_FAILURE(InitState()); VkAttachmentDescription attach_desc = {}; attach_desc.format = VK_FORMAT_R8G8B8A8_UNORM; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; attach_desc.storeOp = VK_ATTACHMENT_STORE_OP_STORE; attach_desc.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; attach_desc.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; attach_desc.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; attach_desc.finalLayout = VK_IMAGE_LAYOUT_UNDEFINED; VkAttachmentReference attach_ref = {}; attach_ref.attachment = 0; attach_ref.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; VkSubpassDescription subpass = {}; subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &attach_ref; VkRenderPassCreateInfo rpci = {}; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; rpci.attachmentCount = 1; rpci.pAttachments = &attach_desc; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-finalLayout-00843", "VUID-VkAttachmentDescription2KHR-finalLayout-03061"); attach_desc.finalLayout = VK_IMAGE_LAYOUT_PREINITIALIZED; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentDescription-finalLayout-00843", "VUID-VkAttachmentDescription2KHR-finalLayout-03061"); } TEST_F(VkLayerTest, RenderPassCreateAttachmentsMisc) { TEST_DESCRIPTION( "Ensure that CreateRenderPass produces the expected validation errors when a subpass's attachments violate the valid usage " "conditions."); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); ASSERT_NO_FATAL_FAILURE(InitState()); std::vector attachments = { // input attachments {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_4_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL}, // color attachments {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_4_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_4_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, // depth attachment {0, VK_FORMAT_D24_UNORM_S8_UINT, VK_SAMPLE_COUNT_4_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}, // resolve attachment {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, // preserve attachments {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_4_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; std::vector input = { {0, VK_IMAGE_LAYOUT_GENERAL}, }; std::vector color = { {1, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {2, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; VkAttachmentReference depth = {3, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}; std::vector resolve = { {4, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {VK_ATTACHMENT_UNUSED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; std::vector preserve = {5}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, (uint32_t)input.size(), input.data(), (uint32_t)color.size(), color.data(), resolve.data(), &depth, (uint32_t)preserve.size(), preserve.data()}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, (uint32_t)attachments.size(), attachments.data(), 1, &subpass, 0, nullptr}; // Test too many color attachments { std::vector too_many_colors(m_device->props.limits.maxColorAttachments + 1, color[0]); subpass.colorAttachmentCount = (uint32_t)too_many_colors.size(); subpass.pColorAttachments = too_many_colors.data(); subpass.pResolveAttachments = NULL; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDescription-colorAttachmentCount-00845", "VUID-VkSubpassDescription2KHR-colorAttachmentCount-03063"); subpass.colorAttachmentCount = (uint32_t)color.size(); subpass.pColorAttachments = color.data(); subpass.pResolveAttachments = resolve.data(); } // Test sample count mismatch between color buffers attachments[subpass.pColorAttachments[1].attachment].samples = VK_SAMPLE_COUNT_8_BIT; depth.attachment = VK_ATTACHMENT_UNUSED; // Avoids triggering 01418 TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDescription-pColorAttachments-01417", "VUID-VkSubpassDescription2KHR-pColorAttachments-03069"); depth.attachment = 3; attachments[subpass.pColorAttachments[1].attachment].samples = attachments[subpass.pColorAttachments[0].attachment].samples; // Test sample count mismatch between color buffers and depth buffer attachments[subpass.pDepthStencilAttachment->attachment].samples = VK_SAMPLE_COUNT_8_BIT; subpass.colorAttachmentCount = 1; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDescription-pDepthStencilAttachment-01418", "VUID-VkSubpassDescription2KHR-pDepthStencilAttachment-03071"); attachments[subpass.pDepthStencilAttachment->attachment].samples = attachments[subpass.pColorAttachments[0].attachment].samples; subpass.colorAttachmentCount = (uint32_t)color.size(); // Test resolve attachment with UNUSED color attachment color[0].attachment = VK_ATTACHMENT_UNUSED; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDescription-pResolveAttachments-00847", "VUID-VkSubpassDescription2KHR-pResolveAttachments-03065"); color[0].attachment = 1; // Test resolve from a single-sampled color attachment attachments[subpass.pColorAttachments[0].attachment].samples = VK_SAMPLE_COUNT_1_BIT; subpass.colorAttachmentCount = 1; // avoid mismatch (00337), and avoid double report subpass.pDepthStencilAttachment = nullptr; // avoid mismatch (01418) TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDescription-pResolveAttachments-00848", "VUID-VkSubpassDescription2KHR-pResolveAttachments-03066"); attachments[subpass.pColorAttachments[0].attachment].samples = VK_SAMPLE_COUNT_4_BIT; subpass.colorAttachmentCount = (uint32_t)color.size(); subpass.pDepthStencilAttachment = &depth; // Test resolve to a multi-sampled resolve attachment attachments[subpass.pResolveAttachments[0].attachment].samples = VK_SAMPLE_COUNT_4_BIT; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDescription-pResolveAttachments-00849", "VUID-VkSubpassDescription2KHR-pResolveAttachments-03067"); attachments[subpass.pResolveAttachments[0].attachment].samples = VK_SAMPLE_COUNT_1_BIT; // Test with color/resolve format mismatch attachments[subpass.pColorAttachments[0].attachment].format = VK_FORMAT_R8G8B8A8_SRGB; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDescription-pResolveAttachments-00850", "VUID-VkSubpassDescription2KHR-pResolveAttachments-03068"); attachments[subpass.pColorAttachments[0].attachment].format = attachments[subpass.pResolveAttachments[0].attachment].format; // Test for UNUSED preserve attachments preserve[0] = VK_ATTACHMENT_UNUSED; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDescription-attachment-00853", "VUID-VkSubpassDescription2KHR-attachment-03073"); preserve[0] = 5; // Test for preserve attachments used elsewhere in the subpass color[0].attachment = preserve[0]; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDescription-pPreserveAttachments-00854", "VUID-VkSubpassDescription2KHR-pPreserveAttachments-03074"); color[0].attachment = 1; input[0].attachment = 0; input[0].layout = VK_IMAGE_LAYOUT_GENERAL; // Test for attachment used first as input with loadOp=CLEAR { std::vector subpasses = {subpass, subpass, subpass}; subpasses[0].inputAttachmentCount = 0; subpasses[1].inputAttachmentCount = 0; attachments[input[0].attachment].loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; VkRenderPassCreateInfo rpci_multipass = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, (uint32_t)attachments.size(), attachments.data(), (uint32_t)subpasses.size(), subpasses.data(), 0, nullptr}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci_multipass, rp2Supported, "VUID-VkSubpassDescription-loadOp-00846", "VUID-VkSubpassDescription2KHR-loadOp-03064"); attachments[input[0].attachment].loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; } } TEST_F(VkLayerTest, RenderPassCreateAttachmentReferenceInvalidLayout) { TEST_DESCRIPTION("Attachment reference uses PREINITIALIZED or UNDEFINED layouts"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); ASSERT_NO_FATAL_FAILURE(InitState()); VkAttachmentDescription attach[] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; VkAttachmentReference refs[] = { {0, VK_IMAGE_LAYOUT_UNDEFINED}, }; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, refs, nullptr, nullptr, 0, nullptr}, }; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 1, subpasses, 0, nullptr}; // Use UNDEFINED layout TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentReference-layout-00857", "VUID-VkAttachmentReference2KHR-layout-03077"); // Use PREINITIALIZED layout refs[0].layout = VK_IMAGE_LAYOUT_PREINITIALIZED; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkAttachmentReference-layout-00857", "VUID-VkAttachmentReference2KHR-layout-03077"); } TEST_F(VkLayerTest, RenderPassCreateOverlappingCorrelationMasks) { TEST_DESCRIPTION("Create a subpass with overlapping correlation masks"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); if (!rp2Supported) { if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MULTIVIEW_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_MULTIVIEW_EXTENSION_NAME); return; } } ASSERT_NO_FATAL_FAILURE(InitState()); VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, nullptr, 0, nullptr}; uint32_t viewMasks[] = {0x3u}; uint32_t correlationMasks[] = {0x1u, 0x3u}; VkRenderPassMultiviewCreateInfo rpmvci = { VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO, nullptr, 1, viewMasks, 0, nullptr, 2, correlationMasks}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, &rpmvci, 0, 0, nullptr, 1, &subpass, 0, nullptr}; // Correlation masks must not overlap TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkRenderPassMultiviewCreateInfo-pCorrelationMasks-00841", "VUID-VkRenderPassCreateInfo2KHR-pCorrelatedViewMasks-03056"); // Check for more specific "don't set any correlation masks when multiview is not enabled" if (rp2Supported) { PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR = (PFN_vkCreateRenderPass2KHR)vkGetDeviceProcAddr(m_device->device(), "vkCreateRenderPass2KHR"); viewMasks[0] = 0; correlationMasks[0] = 0; correlationMasks[1] = 0; safe_VkRenderPassCreateInfo2KHR safe_rpci2; ConvertVkRenderPassCreateInfoToV2KHR(&rpci, &safe_rpci2); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRenderPassCreateInfo2KHR-viewMask-03057"); VkRenderPass rp; VkResult err = vkCreateRenderPass2KHR(m_device->device(), safe_rpci2.ptr(), nullptr, &rp); if (err == VK_SUCCESS) vkDestroyRenderPass(m_device->device(), rp, nullptr); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, RenderPassCreateInvalidViewMasks) { TEST_DESCRIPTION("Create a subpass with the wrong number of view masks, or inconsistent setting of view masks"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); if (!rp2Supported) { if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MULTIVIEW_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_MULTIVIEW_EXTENSION_NAME); return; } } ASSERT_NO_FATAL_FAILURE(InitState()); VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, nullptr, 0, nullptr}, {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, nullptr, 0, nullptr}, }; uint32_t viewMasks[] = {0x3u, 0u}; VkRenderPassMultiviewCreateInfo rpmvci = { VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO, nullptr, 1, viewMasks, 0, nullptr, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, &rpmvci, 0, 0, nullptr, 2, subpasses, 0, nullptr}; // Not enough view masks TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkRenderPassCreateInfo-pNext-01928", "VUID-VkRenderPassCreateInfo2KHR-viewMask-03058"); } TEST_F(VkLayerTest, RenderPassCreateInvalidInputAttachmentReferences) { TEST_DESCRIPTION("Create a subpass with the meta data aspect mask set for an input attachment"); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_MAINTENANCE2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkAttachmentDescription attach = {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL}; VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 1, &ref, 0, nullptr, nullptr, nullptr, 0, nullptr}; VkInputAttachmentAspectReference iaar = {0, 0, VK_IMAGE_ASPECT_METADATA_BIT}; VkRenderPassInputAttachmentAspectCreateInfo rpiaaci = {VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO, nullptr, 1, &iaar}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, &rpiaaci, 0, 1, &attach, 1, &subpass, 0, nullptr}; // Invalid meta data aspect m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRenderPassCreateInfo-pNext-01963"); // Cannot/should not avoid getting this one too TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, false, "VUID-VkInputAttachmentAspectReference-aspectMask-01964", nullptr); // Aspect not present iaar.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, false, "VUID-VkRenderPassCreateInfo-pNext-01963", nullptr); // Invalid subpass index iaar.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; iaar.subpass = 1; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, false, "VUID-VkRenderPassCreateInfo-pNext-01926", nullptr); iaar.subpass = 0; // Invalid input attachment index iaar.inputAttachmentIndex = 1; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, false, "VUID-VkRenderPassCreateInfo-pNext-01927", nullptr); } TEST_F(VkLayerTest, RenderPassCreateSubpassNonGraphicsPipeline) { TEST_DESCRIPTION("Create a subpass with the compute pipeline bind point"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); ASSERT_NO_FATAL_FAILURE(InitState()); VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_COMPUTE, 0, nullptr, 0, nullptr, nullptr, nullptr, 0, nullptr}, }; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 0, nullptr, 1, subpasses, 0, nullptr}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDescription-pipelineBindPoint-00844", "VUID-VkSubpassDescription2KHR-pipelineBindPoint-03062"); } TEST_F(VkLayerTest, RenderPassCreateSubpassMissingAttributesBitMultiviewNVX) { TEST_DESCRIPTION("Create a subpass with the VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX flag missing"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_EXTENSION_NAME) && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MULTIVIEW_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_EXTENSION_NAME); return; } bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); ASSERT_NO_FATAL_FAILURE(InitState()); VkSubpassDescription subpasses[] = { {VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, nullptr, 0, nullptr}, }; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 0, nullptr, 1, subpasses, 0, nullptr}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDescription-flags-00856", "VUID-VkSubpassDescription2KHR-flags-03076"); } TEST_F(VkLayerTest, RenderPassCreate2SubpassInvalidInputAttachmentParameters) { TEST_DESCRIPTION("Create a subpass with parameters in the input attachment ref which are invalid"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); if (!rp2Supported) { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR = rp2Supported ? (PFN_vkCreateRenderPass2KHR)vkGetDeviceProcAddr(m_device->device(), "vkCreateRenderPass2KHR") : nullptr; VkResult err; VkAttachmentReference2KHR reference = {VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2_KHR, nullptr, VK_ATTACHMENT_UNUSED, VK_IMAGE_LAYOUT_UNDEFINED, 0}; VkSubpassDescription2KHR subpass = {VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2_KHR, nullptr, 0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, 1, &reference, 0, nullptr, nullptr, nullptr, 0, nullptr}; VkRenderPassCreateInfo2KHR rpci2 = { VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2_KHR, nullptr, 0, 0, nullptr, 1, &subpass, 0, nullptr, 0, nullptr}; VkRenderPass rp; // Test for aspect mask of 0 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription2KHR-aspectMask-03176"); err = vkCreateRenderPass2KHR(m_device->device(), &rpci2, nullptr, &rp); if (err == VK_SUCCESS) vkDestroyRenderPass(m_device->device(), rp, nullptr); m_errorMonitor->VerifyFound(); // Test for invalid aspect mask bits reference.aspectMask |= VK_IMAGE_ASPECT_FLAG_BITS_MAX_ENUM; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription2KHR-aspectMask-03175"); err = vkCreateRenderPass2KHR(m_device->device(), &rpci2, nullptr, &rp); if (err == VK_SUCCESS) vkDestroyRenderPass(m_device->device(), rp, nullptr); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, RenderPassCreateInvalidSubpassDependencies) { // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); bool multiviewSupported = rp2Supported; if (!rp2Supported && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MULTIVIEW_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME); multiviewSupported = true; } // Add a device features struct enabling NO features VkPhysicalDeviceFeatures features = {0}; ASSERT_NO_FATAL_FAILURE(InitState(&features)); if (m_device->props.apiVersion >= VK_API_VERSION_1_1) { multiviewSupported = true; } // Create two dummy subpasses VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, nullptr, 0, nullptr}, {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, nullptr, 0, nullptr}, }; VkSubpassDependency dependency; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 0, nullptr, 2, subpasses, 1, &dependency}; // dependency = { 0, 1, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, 0 }; // Source subpass is not EXTERNAL, so source stage mask must not include HOST dependency = {0, 1, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDependency-srcSubpass-00858", "VUID-VkSubpassDependency2KHR-srcSubpass-03078"); // Destination subpass is not EXTERNAL, so destination stage mask must not include HOST dependency = {0, 1, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDependency-dstSubpass-00859", "VUID-VkSubpassDependency2KHR-dstSubpass-03079"); // Geometry shaders not enabled source dependency = {0, 1, VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDependency-srcStageMask-00860", "VUID-VkSubpassDependency2KHR-srcStageMask-03080"); // Geometry shaders not enabled destination dependency = {0, 1, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDependency-dstStageMask-00861", "VUID-VkSubpassDependency2KHR-dstStageMask-03081"); // Tessellation not enabled source dependency = {0, 1, VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDependency-srcStageMask-00862", "VUID-VkSubpassDependency2KHR-srcStageMask-03082"); // Tessellation not enabled destination dependency = {0, 1, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDependency-dstStageMask-00863", "VUID-VkSubpassDependency2KHR-dstStageMask-03083"); // Potential cyclical dependency dependency = {1, 0, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDependency-srcSubpass-00864", "VUID-VkSubpassDependency2KHR-srcSubpass-03084"); // EXTERNAL to EXTERNAL dependency dependency = { VK_SUBPASS_EXTERNAL, VK_SUBPASS_EXTERNAL, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDependency-srcSubpass-00865", "VUID-VkSubpassDependency2KHR-srcSubpass-03085"); // Source compute stage not part of subpass 0's GRAPHICS pipeline dependency = {0, VK_SUBPASS_EXTERNAL, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkRenderPassCreateInfo-pDependencies-00837", "VUID-VkRenderPassCreateInfo2KHR-pDependencies-03054"); // Destination compute stage not part of subpass 0's GRAPHICS pipeline dependency = {VK_SUBPASS_EXTERNAL, 0, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkRenderPassCreateInfo-pDependencies-00838", "VUID-VkRenderPassCreateInfo2KHR-pDependencies-03055"); // Non graphics stage in self dependency dependency = {0, 0, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDependency-srcSubpass-01989", "VUID-VkSubpassDependency2KHR-srcSubpass-02244"); // Logically later source stages in self dependency dependency = {0, 0, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDependency-srcSubpass-00867", "VUID-VkSubpassDependency2KHR-srcSubpass-03087"); // Source access mask mismatch with source stage mask dependency = {0, 1, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_ACCESS_UNIFORM_READ_BIT, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDependency-srcAccessMask-00868", "VUID-VkSubpassDependency2KHR-srcAccessMask-03088"); // Destination access mask mismatch with destination stage mask dependency = { 0, 1, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDependency-dstAccessMask-00869", "VUID-VkSubpassDependency2KHR-dstAccessMask-03089"); if (multiviewSupported) { // VIEW_LOCAL_BIT but multiview is not enabled dependency = {0, 1, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, VK_DEPENDENCY_VIEW_LOCAL_BIT}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, nullptr, "VUID-VkRenderPassCreateInfo2KHR-viewMask-03059"); // Enable multiview uint32_t pViewMasks[2] = {0x3u, 0x3u}; int32_t pViewOffsets[2] = {0, 0}; VkRenderPassMultiviewCreateInfo rpmvci = { VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO, nullptr, 2, pViewMasks, 0, nullptr, 0, nullptr}; rpci.pNext = &rpmvci; // Excessive view offsets dependency = {0, 1, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, VK_DEPENDENCY_VIEW_LOCAL_BIT}; rpmvci.pViewOffsets = pViewOffsets; rpmvci.dependencyCount = 2; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, false, "VUID-VkRenderPassCreateInfo-pNext-01929", nullptr); rpmvci.dependencyCount = 0; // View offset with subpass self dependency dependency = {0, 0, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, VK_DEPENDENCY_VIEW_LOCAL_BIT}; rpmvci.pViewOffsets = pViewOffsets; pViewOffsets[0] = 1; rpmvci.dependencyCount = 1; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, false, "VUID-VkRenderPassCreateInfo-pNext-01930", nullptr); rpmvci.dependencyCount = 0; // View offset with no view local bit if (rp2Supported) { dependency = {0, VK_SUBPASS_EXTERNAL, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, 0}; rpmvci.pViewOffsets = pViewOffsets; pViewOffsets[0] = 1; rpmvci.dependencyCount = 1; safe_VkRenderPassCreateInfo2KHR safe_rpci2; ConvertVkRenderPassCreateInfoToV2KHR(&rpci, &safe_rpci2); TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, nullptr, "VUID-VkSubpassDependency2KHR-dependencyFlags-03092"); rpmvci.dependencyCount = 0; } // EXTERNAL subpass with VIEW_LOCAL_BIT - source subpass dependency = {VK_SUBPASS_EXTERNAL, 1, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, VK_DEPENDENCY_VIEW_LOCAL_BIT}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDependency-dependencyFlags-02520", "VUID-VkSubpassDependency2KHR-dependencyFlags-03090"); // EXTERNAL subpass with VIEW_LOCAL_BIT - destination subpass dependency = {0, VK_SUBPASS_EXTERNAL, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, VK_DEPENDENCY_VIEW_LOCAL_BIT}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDependency-dependencyFlags-02521", "VUID-VkSubpassDependency2KHR-dependencyFlags-03091"); // Multiple views but no view local bit in self-dependency dependency = {0, 0, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, 0}; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDependency-srcSubpass-00872", "VUID-VkRenderPassCreateInfo2KHR-pDependencies-03060"); } } TEST_F(VkLayerTest, RenderPassCreateInvalidMixedAttachmentSamplesAMD) { TEST_DESCRIPTION("Verify error messages for supported and unsupported sample counts in render pass attachments."); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME); return; } bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); ASSERT_NO_FATAL_FAILURE(InitState()); std::vector attachments; { VkAttachmentDescription att = {}; att.format = VK_FORMAT_R8G8B8A8_UNORM; att.samples = VK_SAMPLE_COUNT_1_BIT; att.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; att.storeOp = VK_ATTACHMENT_STORE_OP_STORE; att.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; att.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; att.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; att.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; attachments.push_back(att); att.format = VK_FORMAT_D16_UNORM; att.samples = VK_SAMPLE_COUNT_4_BIT; att.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; att.storeOp = VK_ATTACHMENT_STORE_OP_STORE; att.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; att.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE; att.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; att.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; attachments.push_back(att); } VkAttachmentReference color_ref = {}; color_ref.attachment = 0; color_ref.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; VkAttachmentReference depth_ref = {}; depth_ref.attachment = 1; depth_ref.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; VkSubpassDescription subpass = {}; subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &color_ref; subpass.pDepthStencilAttachment = &depth_ref; VkRenderPassCreateInfo rpci = {}; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; rpci.attachmentCount = attachments.size(); rpci.pAttachments = attachments.data(); rpci.subpassCount = 1; rpci.pSubpasses = &subpass; m_errorMonitor->ExpectSuccess(); VkRenderPass rp; VkResult err; err = vkCreateRenderPass(device(), &rpci, NULL, &rp); m_errorMonitor->VerifyNotFound(); if (err == VK_SUCCESS) vkDestroyRenderPass(m_device->device(), rp, nullptr); // Expect an error message for invalid sample counts attachments[0].samples = VK_SAMPLE_COUNT_4_BIT; attachments[1].samples = VK_SAMPLE_COUNT_1_BIT; TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, rp2Supported, "VUID-VkSubpassDescription-pColorAttachments-01506", "VUID-VkSubpassDescription2KHR-pColorAttachments-03070"); } static void TestRenderPassBegin(ErrorMonitor *error_monitor, const VkDevice device, const VkCommandBuffer command_buffer, const VkRenderPassBeginInfo *begin_info, bool rp2Supported, const char *rp1_vuid, const char *rp2_vuid) { VkCommandBufferBeginInfo cmd_begin_info = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, nullptr}; if (rp1_vuid) { vkBeginCommandBuffer(command_buffer, &cmd_begin_info); error_monitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, rp1_vuid); vkCmdBeginRenderPass(command_buffer, begin_info, VK_SUBPASS_CONTENTS_INLINE); error_monitor->VerifyFound(); vkResetCommandBuffer(command_buffer, 0); } if (rp2Supported && rp2_vuid) { PFN_vkCmdBeginRenderPass2KHR vkCmdBeginRenderPass2KHR = (PFN_vkCmdBeginRenderPass2KHR)vkGetDeviceProcAddr(device, "vkCmdBeginRenderPass2KHR"); VkSubpassBeginInfoKHR subpass_begin_info = {VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO_KHR, nullptr, VK_SUBPASS_CONTENTS_INLINE}; vkBeginCommandBuffer(command_buffer, &cmd_begin_info); error_monitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, rp2_vuid); vkCmdBeginRenderPass2KHR(command_buffer, begin_info, &subpass_begin_info); error_monitor->VerifyFound(); vkResetCommandBuffer(command_buffer, 0); } } TEST_F(VkLayerTest, RenderPassBeginInvalidRenderArea) { TEST_DESCRIPTION("Generate INVALID_RENDER_AREA error by beginning renderpass with extent outside of framebuffer"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Framebuffer for render target is 256x256, exceed that for INVALID_RENDER_AREA m_renderPassBeginInfo.renderArea.extent.width = 257; m_renderPassBeginInfo.renderArea.extent.height = 257; TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &m_renderPassBeginInfo, rp2Supported, "Cannot execute a render pass with renderArea not within the bound of the framebuffer.", "Cannot execute a render pass with renderArea not within the bound of the framebuffer."); } TEST_F(VkLayerTest, RenderPassBeginWithinRenderPass) { // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); PFN_vkCmdBeginRenderPass2KHR vkCmdBeginRenderPass2KHR = nullptr; bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); ASSERT_NO_FATAL_FAILURE(InitState()); if (rp2Supported) { vkCmdBeginRenderPass2KHR = (PFN_vkCmdBeginRenderPass2KHR)vkGetDeviceProcAddr(m_device->device(), "vkCmdBeginRenderPass2KHR"); } ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Bind a BeginRenderPass within an active RenderPass m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); // Just use a dummy Renderpass m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBeginRenderPass-renderpass"); vkCmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->VerifyFound(); if (rp2Supported) { VkSubpassBeginInfoKHR subpassBeginInfo = {VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO_KHR, nullptr, VK_SUBPASS_CONTENTS_INLINE}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBeginRenderPass2KHR-renderpass"); vkCmdBeginRenderPass2KHR(m_commandBuffer->handle(), &m_renderPassBeginInfo, &subpassBeginInfo); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, RenderPassBeginIncompatibleFramebufferRenderPass) { TEST_DESCRIPTION("Test that renderpass begin is compatible with the framebuffer renderpass "); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); // Create a depth stencil image view VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_D16_UNORM, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(image.initialized()); VkImageView dsv; VkImageViewCreateInfo dsvci = {}; dsvci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; dsvci.pNext = nullptr; dsvci.image = image.handle(); dsvci.viewType = VK_IMAGE_VIEW_TYPE_2D; dsvci.format = VK_FORMAT_D16_UNORM; dsvci.subresourceRange.layerCount = 1; dsvci.subresourceRange.baseMipLevel = 0; dsvci.subresourceRange.levelCount = 1; dsvci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; vkCreateImageView(m_device->device(), &dsvci, NULL, &dsv); // Create a renderPass with a single attachment that uses loadOp CLEAR VkAttachmentDescription description = {0, VK_FORMAT_D16_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_CLEAR, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL}; VkAttachmentReference depth_stencil_ref = {0, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, &depth_stencil_ref, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &description, 1, &subpass, 0, nullptr}; VkRenderPass rp1, rp2; vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp1); subpass.pDepthStencilAttachment = nullptr; vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp2); // Create a framebuffer VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp1, 1, &dsv, 128, 128, 1}; VkFramebuffer fb; vkCreateFramebuffer(m_device->handle(), &fbci, nullptr, &fb); VkRenderPassBeginInfo rp_begin = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp2, fb, {{0, 0}, {128, 128}}, 0, nullptr}; TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &rp_begin, false, "VUID-VkRenderPassBeginInfo-renderPass-00904", nullptr); vkDestroyRenderPass(m_device->device(), rp1, nullptr); vkDestroyRenderPass(m_device->device(), rp2, nullptr); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyImageView(m_device->device(), dsv, nullptr); } TEST_F(VkLayerTest, RenderPassBeginLayoutsFramebufferImageUsageMismatches) { TEST_DESCRIPTION( "Test that renderpass initial/final layouts match up with the usage bits set for each attachment of the framebuffer"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); bool maintenance2Supported = rp2Supported; // Check for VK_KHR_maintenance2 if (!rp2Supported && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME); maintenance2Supported = true; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); if (m_device->props.apiVersion >= VK_API_VERSION_1_1) { maintenance2Supported = true; } // Create an input attachment view VkImageObj iai(m_device); iai.InitNoLayout(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(iai.initialized()); VkImageView iav; VkImageViewCreateInfo iavci = {}; iavci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; iavci.pNext = nullptr; iavci.image = iai.handle(); iavci.viewType = VK_IMAGE_VIEW_TYPE_2D; iavci.format = VK_FORMAT_R8G8B8A8_UNORM; iavci.subresourceRange.layerCount = 1; iavci.subresourceRange.baseMipLevel = 0; iavci.subresourceRange.levelCount = 1; iavci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; vkCreateImageView(m_device->device(), &iavci, NULL, &iav); // Create a color attachment view VkImageObj cai(m_device); cai.InitNoLayout(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(cai.initialized()); VkImageView cav; VkImageViewCreateInfo cavci = {}; cavci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; cavci.pNext = nullptr; cavci.image = cai.handle(); cavci.viewType = VK_IMAGE_VIEW_TYPE_2D; cavci.format = VK_FORMAT_R8G8B8A8_UNORM; cavci.subresourceRange.layerCount = 1; cavci.subresourceRange.baseMipLevel = 0; cavci.subresourceRange.levelCount = 1; cavci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; vkCreateImageView(m_device->device(), &cavci, NULL, &cav); // Create a renderPass with those attachments VkAttachmentDescription descriptions[] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_CLEAR, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL}, {1, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_CLEAR, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL}}; VkAttachmentReference input_ref = {0, VK_IMAGE_LAYOUT_GENERAL}; VkAttachmentReference color_ref = {1, VK_IMAGE_LAYOUT_GENERAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 1, &input_ref, 1, &color_ref, nullptr, nullptr, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 2, descriptions, 1, &subpass, 0, nullptr}; VkRenderPass rp; vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); // Create a framebuffer VkImageView views[] = {iav, cav}; VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 2, views, 128, 128, 1}; VkFramebuffer fb; vkCreateFramebuffer(m_device->handle(), &fbci, nullptr, &fb); VkRenderPassBeginInfo rp_begin = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{0, 0}, {128, 128}}, 0, nullptr}; VkRenderPass rp_invalid; // Initial layout is VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL but attachment doesn't support IMAGE_USAGE_COLOR_ATTACHMENT_BIT descriptions[0].initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp_invalid); rp_begin.renderPass = rp_invalid; TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &rp_begin, rp2Supported, "VUID-vkCmdBeginRenderPass-initialLayout-00895", "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03094"); vkDestroyRenderPass(m_device->handle(), rp_invalid, nullptr); // Initial layout is VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL but attachment doesn't support VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT // / VK_IMAGE_USAGE_SAMPLED_BIT descriptions[0].initialLayout = VK_IMAGE_LAYOUT_GENERAL; descriptions[1].initialLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp_invalid); rp_begin.renderPass = rp_invalid; TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &rp_begin, rp2Supported, "VUID-vkCmdBeginRenderPass-initialLayout-00897", "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03097"); vkDestroyRenderPass(m_device->handle(), rp_invalid, nullptr); descriptions[1].initialLayout = VK_IMAGE_LAYOUT_GENERAL; // Initial layout is VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL but attachment doesn't support VK_IMAGE_USAGE_TRANSFER_SRC_BIT descriptions[0].initialLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp_invalid); rp_begin.renderPass = rp_invalid; TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &rp_begin, rp2Supported, "VUID-vkCmdBeginRenderPass-initialLayout-00898", "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03098"); vkDestroyRenderPass(m_device->handle(), rp_invalid, nullptr); // Initial layout is VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL but attachment doesn't support VK_IMAGE_USAGE_TRANSFER_DST_BIT descriptions[0].initialLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp_invalid); rp_begin.renderPass = rp_invalid; TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &rp_begin, rp2Supported, "VUID-vkCmdBeginRenderPass-initialLayout-00899", "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03099"); vkDestroyRenderPass(m_device->handle(), rp_invalid, nullptr); // Initial layout is VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL but attachment doesn't support // VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT descriptions[0].initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp_invalid); rp_begin.renderPass = rp_invalid; const char *initial_layout_vuid_rp1 = maintenance2Supported ? "VUID-vkCmdBeginRenderPass-initialLayout-01758" : "VUID-vkCmdBeginRenderPass-initialLayout-00896"; TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &rp_begin, rp2Supported, initial_layout_vuid_rp1, "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03096"); vkDestroyRenderPass(m_device->handle(), rp_invalid, nullptr); // Initial layout is VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL but attachment doesn't support // VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT descriptions[0].initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL; vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp_invalid); rp_begin.renderPass = rp_invalid; TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &rp_begin, rp2Supported, initial_layout_vuid_rp1, "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03096"); vkDestroyRenderPass(m_device->handle(), rp_invalid, nullptr); if (maintenance2Supported || rp2Supported) { // Initial layout is VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL but attachment doesn't support // VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT descriptions[0].initialLayout = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL; vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp_invalid); rp_begin.renderPass = rp_invalid; TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &rp_begin, rp2Supported, "VUID-vkCmdBeginRenderPass-initialLayout-01758", "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03096"); vkDestroyRenderPass(m_device->handle(), rp_invalid, nullptr); // Initial layout is VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL but attachment doesn't support // VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT descriptions[0].initialLayout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL; vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp_invalid); rp_begin.renderPass = rp_invalid; TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &rp_begin, rp2Supported, "VUID-vkCmdBeginRenderPass-initialLayout-01758", "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03096"); vkDestroyRenderPass(m_device->handle(), rp_invalid, nullptr); } vkDestroyRenderPass(m_device->device(), rp, nullptr); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyImageView(m_device->device(), iav, nullptr); vkDestroyImageView(m_device->device(), cav, nullptr); } TEST_F(VkLayerTest, RenderPassBeginClearOpMismatch) { TEST_DESCRIPTION( "Begin a renderPass where clearValueCount is less than the number of renderPass attachments that use " "loadOp VK_ATTACHMENT_LOAD_OP_CLEAR."); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Create a renderPass with a single attachment that uses loadOp CLEAR VkAttachmentReference attach = {}; attach.layout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription subpass = {}; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &attach; VkRenderPassCreateInfo rpci = {}; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 1; VkAttachmentDescription attach_desc = {}; attach_desc.format = VK_FORMAT_B8G8R8A8_UNORM; // Set loadOp to CLEAR attach_desc.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; rpci.pAttachments = &attach_desc; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; VkRenderPass rp; vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); VkRenderPassBeginInfo rp_begin = {}; rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rp_begin.pNext = NULL; rp_begin.renderPass = renderPass(); rp_begin.framebuffer = framebuffer(); rp_begin.clearValueCount = 0; // Should be 1 TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &rp_begin, rp2Supported, "VUID-VkRenderPassBeginInfo-clearValueCount-00902", "VUID-VkRenderPassBeginInfo-clearValueCount-00902"); vkDestroyRenderPass(m_device->device(), rp, NULL); } TEST_F(VkLayerTest, RenderPassBeginSampleLocationsInvalidIndicesEXT) { TEST_DESCRIPTION("Test that attachment indices and subpass indices specifed by sample locations structures are valid"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); // Create a depth stencil image view VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_D16_UNORM, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(image.initialized()); VkImageView dsv; VkImageViewCreateInfo dsvci = {}; dsvci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; dsvci.pNext = nullptr; dsvci.image = image.handle(); dsvci.viewType = VK_IMAGE_VIEW_TYPE_2D; dsvci.format = VK_FORMAT_D16_UNORM; dsvci.subresourceRange.layerCount = 1; dsvci.subresourceRange.baseMipLevel = 0; dsvci.subresourceRange.levelCount = 1; dsvci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; vkCreateImageView(m_device->device(), &dsvci, NULL, &dsv); // Create a renderPass with a single attachment that uses loadOp CLEAR VkAttachmentDescription description = {0, VK_FORMAT_D16_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_CLEAR, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL}; VkAttachmentReference depth_stencil_ref = {0, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, &depth_stencil_ref, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &description, 1, &subpass, 0, nullptr}; VkRenderPass rp; vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); // Create a framebuffer VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &dsv, 128, 128, 1}; VkFramebuffer fb; vkCreateFramebuffer(m_device->handle(), &fbci, nullptr, &fb); VkSampleLocationEXT sample_location = {0.5, 0.5}; VkSampleLocationsInfoEXT sample_locations_info = { VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT, nullptr, VK_SAMPLE_COUNT_1_BIT, {1, 1}, 1, &sample_location}; VkAttachmentSampleLocationsEXT attachment_sample_locations = {0, sample_locations_info}; VkSubpassSampleLocationsEXT subpass_sample_locations = {0, sample_locations_info}; VkRenderPassSampleLocationsBeginInfoEXT rp_sl_begin = {VK_STRUCTURE_TYPE_RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT, nullptr, 1, &attachment_sample_locations, 1, &subpass_sample_locations}; VkRenderPassBeginInfo rp_begin = { VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, &rp_sl_begin, rp, fb, {{0, 0}, {128, 128}}, 0, nullptr}; attachment_sample_locations.attachmentIndex = 1; TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &rp_begin, false, "VUID-VkAttachmentSampleLocationsEXT-attachmentIndex-01531", nullptr); attachment_sample_locations.attachmentIndex = 0; subpass_sample_locations.subpassIndex = 1; TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &rp_begin, false, "VUID-VkSubpassSampleLocationsEXT-subpassIndex-01532", nullptr); subpass_sample_locations.subpassIndex = 0; vkDestroyRenderPass(m_device->device(), rp, nullptr); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyImageView(m_device->device(), dsv, nullptr); } TEST_F(VkLayerTest, RenderPassNextSubpassExcessive) { TEST_DESCRIPTION("Test that an error is produced when CmdNextSubpass is called too many times in a renderpass instance"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); PFN_vkCmdNextSubpass2KHR vkCmdNextSubpass2KHR = nullptr; bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); ASSERT_NO_FATAL_FAILURE(InitState()); if (rp2Supported) { vkCmdNextSubpass2KHR = (PFN_vkCmdNextSubpass2KHR)vkGetDeviceProcAddr(m_device->device(), "vkCmdNextSubpass2KHR"); } ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdNextSubpass-None-00909"); vkCmdNextSubpass(m_commandBuffer->handle(), VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->VerifyFound(); if (rp2Supported) { VkSubpassBeginInfoKHR subpassBeginInfo = {VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO_KHR, nullptr, VK_SUBPASS_CONTENTS_INLINE}; VkSubpassEndInfoKHR subpassEndInfo = {VK_STRUCTURE_TYPE_SUBPASS_END_INFO_KHR, nullptr}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdNextSubpass2KHR-None-03102"); vkCmdNextSubpass2KHR(m_commandBuffer->handle(), &subpassBeginInfo, &subpassEndInfo); m_errorMonitor->VerifyFound(); } m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, RenderPassEndBeforeFinalSubpass) { TEST_DESCRIPTION("Test that an error is produced when CmdEndRenderPass is called before the final subpass has been reached"); // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); PFN_vkCmdEndRenderPass2KHR vkCmdEndRenderPass2KHR = nullptr; bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names); ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); if (rp2Supported) { vkCmdEndRenderPass2KHR = (PFN_vkCmdEndRenderPass2KHR)vkGetDeviceProcAddr(m_device->device(), "vkCmdEndRenderPass2KHR"); } VkSubpassDescription sd[2] = {{0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, nullptr, 0, nullptr}, {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, nullptr, 0, nullptr}}; VkRenderPassCreateInfo rcpi = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 0, nullptr, 2, sd, 0, nullptr}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rcpi, nullptr, &rp); ASSERT_VK_SUCCESS(err); VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 0, nullptr, 16, 16, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fbci, nullptr, &fb); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{0, 0}, {16, 16}}, 0, nullptr}; vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdEndRenderPass-None-00910"); vkCmdEndRenderPass(m_commandBuffer->handle()); m_errorMonitor->VerifyFound(); if (rp2Supported) { VkSubpassEndInfoKHR subpassEndInfo = {VK_STRUCTURE_TYPE_SUBPASS_END_INFO_KHR, nullptr}; m_commandBuffer->reset(); m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdEndRenderPass2KHR-None-03103"); vkCmdEndRenderPass2KHR(m_commandBuffer->handle(), &subpassEndInfo); m_errorMonitor->VerifyFound(); } // Clean up. vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkLayerTest, RenderPassDestroyWhileInUse) { TEST_DESCRIPTION("Delete in-use renderPass."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Create simple renderpass VkAttachmentReference attach = {}; attach.layout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription subpass = {}; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &attach; VkRenderPassCreateInfo rpci = {}; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 1; VkAttachmentDescription attach_desc = {}; attach_desc.format = VK_FORMAT_B8G8R8A8_UNORM; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; rpci.pAttachments = &attach_desc; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); ASSERT_VK_SUCCESS(err); m_errorMonitor->ExpectSuccess(); m_commandBuffer->begin(); VkRenderPassBeginInfo rpbi = {}; rpbi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rpbi.framebuffer = m_framebuffer; rpbi.renderPass = rp; m_commandBuffer->BeginRenderPass(rpbi); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyNotFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyRenderPass-renderPass-00873"); vkDestroyRenderPass(m_device->device(), rp, nullptr); m_errorMonitor->VerifyFound(); // Wait for queue to complete so we can safely destroy rp vkQueueWaitIdle(m_device->m_queue); m_errorMonitor->SetUnexpectedError("If renderPass is not VK_NULL_HANDLE, renderPass must be a valid VkRenderPass handle"); m_errorMonitor->SetUnexpectedError("Was it created? Has it already been destroyed?"); vkDestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkPositiveLayerTest, RenderPassCreateAttachmentUsedTwiceOK) { TEST_DESCRIPTION("Attachment is used simultaneously as color and input, with the same layout. This is OK."); ASSERT_NO_FATAL_FAILURE(Init()); VkAttachmentDescription attach[] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL}, }; VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_GENERAL}; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 1, &ref, 1, &ref, nullptr, nullptr, 0, nullptr}, }; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 1, subpasses, 0, nullptr}; VkRenderPass rp; m_errorMonitor->ExpectSuccess(); vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); m_errorMonitor->VerifyNotFound(); vkDestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkPositiveLayerTest, RenderPassCreateInitialLayoutUndefined) { TEST_DESCRIPTION( "Ensure that CmdBeginRenderPass with an attachment's initialLayout of VK_IMAGE_LAYOUT_UNDEFINED works when the command " "buffer has prior knowledge of that attachment's layout."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); // A renderpass with one color attachment. VkAttachmentDescription attachment = {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkAttachmentReference att_ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &att_ref, nullptr, nullptr, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &attachment, 1, &subpass, 0, nullptr}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); // A compatible framebuffer. VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageViewCreateInfo ivci = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, nullptr, 0, image.handle(), VK_IMAGE_VIEW_TYPE_2D, VK_FORMAT_R8G8B8A8_UNORM, {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY}, {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}, }; VkImageView view; err = vkCreateImageView(m_device->device(), &ivci, nullptr, &view); ASSERT_VK_SUCCESS(err); VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &view, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb); ASSERT_VK_SUCCESS(err); // Record a single command buffer which uses this renderpass twice. The // bug is triggered at the beginning of the second renderpass, when the // command buffer already has a layout recorded for the attachment. VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{0, 0}, {32, 32}}, 0, nullptr}; m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); vkCmdEndRenderPass(m_commandBuffer->handle()); vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->VerifyNotFound(); vkCmdEndRenderPass(m_commandBuffer->handle()); m_commandBuffer->end(); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyRenderPass(m_device->device(), rp, nullptr); vkDestroyImageView(m_device->device(), view, nullptr); } TEST_F(VkPositiveLayerTest, RenderPassCreateAttachmentLayoutWithLoadOpThenReadOnly) { TEST_DESCRIPTION( "Positive test where we create a renderpass with an attachment that uses LOAD_OP_CLEAR, the first subpass has a valid " "layout, and a second subpass then uses a valid *READ_ONLY* layout."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } VkAttachmentReference attach[2] = {}; attach[0].attachment = 0; attach[0].layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; attach[1].attachment = 0; attach[1].layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL; VkSubpassDescription subpasses[2] = {}; // First subpass clears DS attach on load subpasses[0].pDepthStencilAttachment = &attach[0]; // 2nd subpass reads in DS as input attachment subpasses[1].inputAttachmentCount = 1; subpasses[1].pInputAttachments = &attach[1]; VkAttachmentDescription attach_desc = {}; attach_desc.format = depth_format; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.storeOp = VK_ATTACHMENT_STORE_OP_STORE; attach_desc.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; attach_desc.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; attach_desc.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; attach_desc.initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; attach_desc.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL; VkRenderPassCreateInfo rpci = {}; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; rpci.attachmentCount = 1; rpci.pAttachments = &attach_desc; rpci.subpassCount = 2; rpci.pSubpasses = subpasses; // Now create RenderPass and verify no errors VkRenderPass rp; vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); m_errorMonitor->VerifyNotFound(); vkDestroyRenderPass(m_device->device(), rp, NULL); } TEST_F(VkPositiveLayerTest, RenderPassBeginSubpassZeroTransitionsApplied) { TEST_DESCRIPTION("Ensure that CmdBeginRenderPass applies the layout transitions for the first subpass"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); // A renderpass with one color attachment. VkAttachmentDescription attachment = {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkAttachmentReference att_ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &att_ref, nullptr, nullptr, 0, nullptr}; VkSubpassDependency dep = {0, 0, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_DEPENDENCY_BY_REGION_BIT}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &attachment, 1, &subpass, 1, &dep}; VkResult err; VkRenderPass rp; err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); // A compatible framebuffer. VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView view = image.targetView(VK_FORMAT_R8G8B8A8_UNORM); VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &view, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb); ASSERT_VK_SUCCESS(err); // Record a single command buffer which issues a pipeline barrier w/ // image memory barrier for the attachment. This detects the previously // missing tracking of the subpass layout by throwing a validation error // if it doesn't occur. VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{0, 0}, {32, 32}}, 0, nullptr}; m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); VkImageMemoryBarrier imb = {VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, nullptr, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, image.handle(), {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}}; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &imb); vkCmdEndRenderPass(m_commandBuffer->handle()); m_errorMonitor->VerifyNotFound(); m_commandBuffer->end(); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkPositiveLayerTest, RenderPassBeginTransitionsAttachmentUnused) { TEST_DESCRIPTION( "Ensure that layout transitions work correctly without errors, when an attachment reference is VK_ATTACHMENT_UNUSED"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); // A renderpass with no attachments VkAttachmentReference att_ref = {VK_ATTACHMENT_UNUSED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &att_ref, nullptr, nullptr, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 0, nullptr, 1, &subpass, 0, nullptr}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); // A compatible framebuffer. VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 0, nullptr, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb); ASSERT_VK_SUCCESS(err); // Record a command buffer which just begins and ends the renderpass. The // bug manifests in BeginRenderPass. VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{0, 0}, {32, 32}}, 0, nullptr}; m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); vkCmdEndRenderPass(m_commandBuffer->handle()); m_errorMonitor->VerifyNotFound(); m_commandBuffer->end(); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkPositiveLayerTest, RenderPassBeginStencilLoadOp) { TEST_DESCRIPTION("Create a stencil-only attachment with a LOAD_OP set to CLEAR. stencil[Load|Store]Op used to be ignored."); VkResult result = VK_SUCCESS; ASSERT_NO_FATAL_FAILURE(Init()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } VkImageFormatProperties formatProps; vkGetPhysicalDeviceImageFormatProperties(gpu(), depth_format, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, 0, &formatProps); if (formatProps.maxExtent.width < 100 || formatProps.maxExtent.height < 100) { printf("%s Image format max extent is too small.\n", kSkipPrefix); return; } VkFormat depth_stencil_fmt = depth_format; m_depthStencil->Init(m_device, 100, 100, depth_stencil_fmt, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT); VkAttachmentDescription att = {}; VkAttachmentReference ref = {}; att.format = depth_stencil_fmt; att.samples = VK_SAMPLE_COUNT_1_BIT; att.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; att.storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; att.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; att.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE; att.initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; att.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; VkClearValue clear; clear.depthStencil.depth = 1.0; clear.depthStencil.stencil = 0; ref.attachment = 0; ref.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; VkSubpassDescription subpass = {}; subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; subpass.flags = 0; subpass.inputAttachmentCount = 0; subpass.pInputAttachments = NULL; subpass.colorAttachmentCount = 0; subpass.pColorAttachments = NULL; subpass.pResolveAttachments = NULL; subpass.pDepthStencilAttachment = &ref; subpass.preserveAttachmentCount = 0; subpass.pPreserveAttachments = NULL; VkRenderPass rp; VkRenderPassCreateInfo rp_info = {}; rp_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; rp_info.attachmentCount = 1; rp_info.pAttachments = &att; rp_info.subpassCount = 1; rp_info.pSubpasses = &subpass; result = vkCreateRenderPass(device(), &rp_info, NULL, &rp); ASSERT_VK_SUCCESS(result); VkImageView *depthView = m_depthStencil->BindInfo(); VkFramebufferCreateInfo fb_info = {}; fb_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; fb_info.pNext = NULL; fb_info.renderPass = rp; fb_info.attachmentCount = 1; fb_info.pAttachments = depthView; fb_info.width = 100; fb_info.height = 100; fb_info.layers = 1; VkFramebuffer fb; result = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); ASSERT_VK_SUCCESS(result); VkRenderPassBeginInfo rpbinfo = {}; rpbinfo.clearValueCount = 1; rpbinfo.pClearValues = &clear; rpbinfo.pNext = NULL; rpbinfo.renderPass = rp; rpbinfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rpbinfo.renderArea.extent.width = 100; rpbinfo.renderArea.extent.height = 100; rpbinfo.renderArea.offset.x = 0; rpbinfo.renderArea.offset.y = 0; rpbinfo.framebuffer = fb; VkFenceObj fence; fence.init(*m_device, VkFenceObj::create_info()); ASSERT_TRUE(fence.initialized()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(rpbinfo); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); m_commandBuffer->QueueCommandBuffer(fence); VkImageObj destImage(m_device); destImage.Init(100, 100, 1, depth_stencil_fmt, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkImageMemoryBarrier barrier = {}; VkImageSubresourceRange range; barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT; barrier.oldLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; barrier.image = m_depthStencil->handle(); range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; range.baseMipLevel = 0; range.levelCount = 1; range.baseArrayLayer = 0; range.layerCount = 1; barrier.subresourceRange = range; fence.wait(VK_TRUE, UINT64_MAX); VkCommandBufferObj cmdbuf(m_device, m_commandPool); cmdbuf.begin(); cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier); barrier.srcAccessMask = 0; barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED; barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; barrier.image = destImage.handle(); barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier); VkImageCopy cregion; cregion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; cregion.srcSubresource.mipLevel = 0; cregion.srcSubresource.baseArrayLayer = 0; cregion.srcSubresource.layerCount = 1; cregion.srcOffset.x = 0; cregion.srcOffset.y = 0; cregion.srcOffset.z = 0; cregion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; cregion.dstSubresource.mipLevel = 0; cregion.dstSubresource.baseArrayLayer = 0; cregion.dstSubresource.layerCount = 1; cregion.dstOffset.x = 0; cregion.dstOffset.y = 0; cregion.dstOffset.z = 0; cregion.extent.width = 100; cregion.extent.height = 100; cregion.extent.depth = 1; cmdbuf.CopyImage(m_depthStencil->handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, destImage.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &cregion); cmdbuf.end(); VkSubmitInfo submit_info; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.pNext = NULL; submit_info.waitSemaphoreCount = 0; submit_info.pWaitSemaphores = NULL; submit_info.pWaitDstStageMask = NULL; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &cmdbuf.handle(); submit_info.signalSemaphoreCount = 0; submit_info.pSignalSemaphores = NULL; m_errorMonitor->ExpectSuccess(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyNotFound(); vkQueueWaitIdle(m_device->m_queue); vkDestroyRenderPass(m_device->device(), rp, nullptr); vkDestroyFramebuffer(m_device->device(), fb, nullptr); } TEST_F(VkPositiveLayerTest, RenderPassBeginInlineAndSecondaryCommandBuffers) { m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS); vkCmdEndRenderPass(m_commandBuffer->handle()); m_errorMonitor->VerifyNotFound(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->VerifyNotFound(); vkCmdEndRenderPass(m_commandBuffer->handle()); m_errorMonitor->VerifyNotFound(); m_commandBuffer->end(); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, RenderPassBeginDepthStencilLayoutTransitionFromUndefined) { TEST_DESCRIPTION( "Create a render pass with depth-stencil attachment where layout transition from UNDEFINED TO DS_READ_ONLY_OPTIMAL is set " "by render pass and verify that transition has correctly occurred at queue submit time with no validation errors."); ASSERT_NO_FATAL_FAILURE(Init()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } VkImageFormatProperties format_props; vkGetPhysicalDeviceImageFormatProperties(gpu(), depth_format, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, 0, &format_props); if (format_props.maxExtent.width < 32 || format_props.maxExtent.height < 32) { printf("%s Depth extent too small, RenderPassDepthStencilLayoutTransition skipped.\n", kSkipPrefix); return; } m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // A renderpass with one depth/stencil attachment. VkAttachmentDescription attachment = {0, depth_format, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}; VkAttachmentReference att_ref = {0, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, &att_ref, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &attachment, 1, &subpass, 0, nullptr}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); // A compatible ds image. VkImageObj image(m_device); image.Init(32, 32, 1, depth_format, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageViewCreateInfo ivci = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, nullptr, 0, image.handle(), VK_IMAGE_VIEW_TYPE_2D, depth_format, {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY}, {VK_IMAGE_ASPECT_DEPTH_BIT, 0, 1, 0, 1}, }; VkImageView view; err = vkCreateImageView(m_device->device(), &ivci, nullptr, &view); ASSERT_VK_SUCCESS(err); VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &view, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb); ASSERT_VK_SUCCESS(err); VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{0, 0}, {32, 32}}, 0, nullptr}; m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); vkCmdEndRenderPass(m_commandBuffer->handle()); m_commandBuffer->end(); m_commandBuffer->QueueCommandBuffer(false); m_errorMonitor->VerifyNotFound(); // Cleanup vkDestroyImageView(m_device->device(), view, NULL); vkDestroyRenderPass(m_device->device(), rp, NULL); vkDestroyFramebuffer(m_device->device(), fb, NULL); } TEST_F(VkLayerTest, DisabledIndependentBlend) { TEST_DESCRIPTION( "Generate INDEPENDENT_BLEND by disabling independent blend and then specifying different blend states for two " "attachments"); VkPhysicalDeviceFeatures features = {}; features.independentBlend = VK_FALSE; ASSERT_NO_FATAL_FAILURE(Init(&features)); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "Invalid Pipeline CreateInfo: If independent blend feature not enabled, all elements of pAttachments must be identical"); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); VkPipelineObj pipeline(m_device); // Create a renderPass with two color attachments VkAttachmentReference attachments[2] = {}; attachments[0].layout = VK_IMAGE_LAYOUT_GENERAL; attachments[1].attachment = 1; attachments[1].layout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription subpass = {}; subpass.pColorAttachments = attachments; subpass.colorAttachmentCount = 2; VkRenderPassCreateInfo rpci = {}; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 2; VkAttachmentDescription attach_desc[2] = {}; attach_desc[0].format = VK_FORMAT_B8G8R8A8_UNORM; attach_desc[0].samples = VK_SAMPLE_COUNT_1_BIT; attach_desc[0].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; attach_desc[0].finalLayout = VK_IMAGE_LAYOUT_GENERAL; attach_desc[1].format = VK_FORMAT_B8G8R8A8_UNORM; attach_desc[1].samples = VK_SAMPLE_COUNT_1_BIT; attach_desc[1].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; attach_desc[1].finalLayout = VK_IMAGE_LAYOUT_GENERAL; rpci.pAttachments = attach_desc; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; VkRenderPass renderpass; vkCreateRenderPass(m_device->device(), &rpci, NULL, &renderpass); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); pipeline.AddShader(&vs); VkPipelineColorBlendAttachmentState att_state1 = {}, att_state2 = {}; att_state1.dstAlphaBlendFactor = VK_BLEND_FACTOR_CONSTANT_COLOR; att_state1.blendEnable = VK_TRUE; att_state2.dstAlphaBlendFactor = VK_BLEND_FACTOR_CONSTANT_COLOR; att_state2.blendEnable = VK_FALSE; pipeline.AddColorAttachment(0, att_state1); pipeline.AddColorAttachment(1, att_state2); pipeline.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderpass); m_errorMonitor->VerifyFound(); vkDestroyRenderPass(m_device->device(), renderpass, NULL); } // Is the Pipeline compatible with the expectations of the Renderpass/subpasses? TEST_F(VkLayerTest, PipelineRenderpassCompatibility) { TEST_DESCRIPTION( "Create a graphics pipeline that is incompatible with the requirements of its contained Renderpass/subpasses."); ASSERT_NO_FATAL_FAILURE(Init()); VkDescriptorSetObj ds_obj(m_device); ds_obj.AppendDummy(); ds_obj.CreateVKDescriptorSet(m_commandBuffer); VkShaderObj vs_obj(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkPipelineColorBlendAttachmentState att_state1 = {}; att_state1.dstAlphaBlendFactor = VK_BLEND_FACTOR_CONSTANT_COLOR; att_state1.blendEnable = VK_TRUE; VkRenderpassObj rp_obj(m_device); { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00753"); VkPipelineObj pipeline(m_device); pipeline.AddShader(&vs_obj); pipeline.AddColorAttachment(0, att_state1); VkGraphicsPipelineCreateInfo info = {}; pipeline.InitGraphicsPipelineCreateInfo(&info); info.pColorBlendState = nullptr; pipeline.CreateVKPipeline(ds_obj.GetPipelineLayout(), rp_obj.handle(), &info); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, FramebufferCreateErrors) { TEST_DESCRIPTION( "Hit errors when attempting to create a framebuffer :\n" " 1. Mismatch between framebuffer & renderPass attachmentCount\n" " 2. Use a color image as depthStencil attachment\n" " 3. Mismatch framebuffer & renderPass attachment formats\n" " 4. Mismatch framebuffer & renderPass attachment #samples\n" " 5. Framebuffer attachment w/ non-1 mip-levels\n" " 6. Framebuffer attachment where dimensions don't match\n" " 7. Framebuffer attachment where dimensions don't match\n" " 8. Framebuffer attachment w/o identity swizzle\n" " 9. framebuffer dimensions exceed physical device limits\n"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-attachmentCount-00876"); // Create a renderPass with a single color attachment VkAttachmentReference attach = {}; attach.layout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription subpass = {}; subpass.pColorAttachments = &attach; VkRenderPassCreateInfo rpci = {}; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 1; VkAttachmentDescription attach_desc = {}; attach_desc.format = VK_FORMAT_B8G8R8A8_UNORM; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; rpci.pAttachments = &attach_desc; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); ASSERT_VK_SUCCESS(err); VkImageView ivs[2]; ivs[0] = m_renderTargets[0]->targetView(VK_FORMAT_B8G8R8A8_UNORM); ivs[1] = m_renderTargets[0]->targetView(VK_FORMAT_B8G8R8A8_UNORM); VkFramebufferCreateInfo fb_info = {}; fb_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; fb_info.pNext = NULL; fb_info.renderPass = rp; // Set mis-matching attachmentCount fb_info.attachmentCount = 2; fb_info.pAttachments = ivs; fb_info.width = 100; fb_info.height = 100; fb_info.layers = 1; VkFramebuffer fb; err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } vkDestroyRenderPass(m_device->device(), rp, NULL); // Create a renderPass with a depth-stencil attachment created with // IMAGE_USAGE_COLOR_ATTACHMENT // Add our color attachment to pDepthStencilAttachment subpass.pDepthStencilAttachment = &attach; subpass.pColorAttachments = NULL; VkRenderPass rp_ds; err = vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp_ds); ASSERT_VK_SUCCESS(err); // Set correct attachment count, but attachment has COLOR usage bit set fb_info.attachmentCount = 1; fb_info.renderPass = rp_ds; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-02633"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } vkDestroyRenderPass(m_device->device(), rp_ds, NULL); // Create new renderpass with alternate attachment format from fb attach_desc.format = VK_FORMAT_R8G8B8A8_UNORM; subpass.pDepthStencilAttachment = NULL; subpass.pColorAttachments = &attach; err = vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); ASSERT_VK_SUCCESS(err); // Cause error due to mis-matched formats between rp & fb // rp attachment 0 now has RGBA8 but corresponding fb attach is BGRA8 fb_info.renderPass = rp; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00880"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } vkDestroyRenderPass(m_device->device(), rp, NULL); // Create new renderpass with alternate sample count from fb attach_desc.format = VK_FORMAT_B8G8R8A8_UNORM; attach_desc.samples = VK_SAMPLE_COUNT_4_BIT; err = vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); ASSERT_VK_SUCCESS(err); // Cause error due to mis-matched sample count between rp & fb fb_info.renderPass = rp; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00881"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } vkDestroyRenderPass(m_device->device(), rp, NULL); { // Create an image with 2 mip levels. VkImageObj image(m_device); image.Init(128, 128, 2, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); // Create a image view with two mip levels. VkImageView view; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_B8G8R8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; // Set level count to 2 (only 1 is allowed for FB attachment) ivci.subresourceRange.levelCount = 2; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; err = vkCreateImageView(m_device->device(), &ivci, NULL, &view); ASSERT_VK_SUCCESS(err); // Re-create renderpass to have matching sample count attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; err = vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); ASSERT_VK_SUCCESS(err); fb_info.renderPass = rp; fb_info.pAttachments = &view; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00883"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } vkDestroyImageView(m_device->device(), view, NULL); } // Update view to original color buffer and grow FB dimensions too big fb_info.pAttachments = ivs; fb_info.height = 1024; fb_info.width = 1024; fb_info.layers = 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00882"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } { // Create an image with one mip level. VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); // Create view attachment with non-identity swizzle VkImageView view; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_B8G8R8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; ivci.components.r = VK_COMPONENT_SWIZZLE_G; ivci.components.g = VK_COMPONENT_SWIZZLE_R; ivci.components.b = VK_COMPONENT_SWIZZLE_A; ivci.components.a = VK_COMPONENT_SWIZZLE_B; err = vkCreateImageView(m_device->device(), &ivci, NULL, &view); ASSERT_VK_SUCCESS(err); fb_info.pAttachments = &view; fb_info.height = 100; fb_info.width = 100; fb_info.layers = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00884"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } vkDestroyImageView(m_device->device(), view, NULL); } // reset attachment to color attachment fb_info.pAttachments = ivs; // Request fb that exceeds max width fb_info.width = m_device->props.limits.maxFramebufferWidth + 1; fb_info.height = 100; fb_info.layers = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-width-00886"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00882"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } // and width=0 fb_info.width = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-width-00885"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } // Request fb that exceeds max height fb_info.width = 100; fb_info.height = m_device->props.limits.maxFramebufferHeight + 1; fb_info.layers = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-height-00888"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00882"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } // and height=0 fb_info.height = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-height-00887"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } // Request fb that exceeds max layers fb_info.width = 100; fb_info.height = 100; fb_info.layers = m_device->props.limits.maxFramebufferLayers + 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-layers-00890"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00882"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } // and layers=0 fb_info.layers = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-layers-00889"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } vkDestroyRenderPass(m_device->device(), rp, NULL); } TEST_F(VkLayerTest, PointSizeFailure) { TEST_DESCRIPTION("Create a pipeline using TOPOLOGY_POINT_LIST but do not set PointSize in vertex shader."); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Pipeline topology is set to POINT_LIST"); ASSERT_NO_FATAL_FAILURE(InitViewport()); // Create VS declaring PointSize but not writing to it static const char NoPointSizeVertShader[] = "#version 450\n" "vec2 vertices[3];\n" "out gl_PerVertex\n" "{\n" " vec4 gl_Position;\n" " float gl_PointSize;\n" "};\n" "void main() {\n" " vertices[0] = vec2(-1.0, -1.0);\n" " vertices[1] = vec2( 1.0, -1.0);\n" " vertices[2] = vec2( 0.0, 1.0);\n" " gl_Position = vec4(vertices[gl_VertexIndex % 3], 0.0, 1.0);\n" "}\n"; VkShaderObj vs(m_device, NoPointSizeVertShader, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj ps(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipelineobj(m_device); pipelineobj.AddDefaultColorAttachment(); pipelineobj.AddShader(&vs); pipelineobj.AddShader(&ps); // Set Input Assembly to TOPOLOGY POINT LIST VkPipelineInputAssemblyStateCreateInfo ia_state = {}; ia_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; ia_state.topology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST; pipelineobj.SetInputAssembly(&ia_state); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); m_commandBuffer->ClearAllBuffers(m_renderTargets, m_clear_color, m_depthStencil, m_depth_clear_color, m_stencil_clear_color); m_commandBuffer->PrepareAttachments(m_renderTargets, m_depthStencil); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipelineobj.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, PointSizeGeomShaderFailure) { TEST_DESCRIPTION( "Create a pipeline using TOPOLOGY_POINT_LIST, set PointSize vertex shader, but not in the final geometry stage."); ASSERT_NO_FATAL_FAILURE(Init()); if ((!m_device->phy().features().geometryShader) || (!m_device->phy().features().shaderTessellationAndGeometryPointSize)) { printf("%s Device does not support the required geometry shader features; skipped.\n", kSkipPrefix); return; } m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Pipeline topology is set to POINT_LIST"); ASSERT_NO_FATAL_FAILURE(InitViewport()); // Create VS declaring PointSize and writing to it static const char PointSizeVertShader[] = "#version 450\n" "vec2 vertices[3];\n" "out gl_PerVertex\n" "{\n" " vec4 gl_Position;\n" " float gl_PointSize;\n" "};\n" "void main() {\n" " vertices[0] = vec2(-1.0, -1.0);\n" " vertices[1] = vec2( 1.0, -1.0);\n" " vertices[2] = vec2( 0.0, 1.0);\n" " gl_Position = vec4(vertices[gl_VertexIndex % 3], 0.0, 1.0);\n" " gl_PointSize = 5.0;\n" "}\n"; static char const *gsSource = "#version 450\n" "layout (points) in;\n" "layout (points) out;\n" "layout (max_vertices = 1) out;\n" "void main() {\n" " gl_Position = vec4(1.0, 0.5, 0.5, 0.0);\n" " EmitVertex();\n" "}\n"; VkShaderObj vs(m_device, PointSizeVertShader, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj gs(m_device, gsSource, VK_SHADER_STAGE_GEOMETRY_BIT, this); VkShaderObj ps(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipelineobj(m_device); pipelineobj.AddDefaultColorAttachment(); pipelineobj.AddShader(&vs); pipelineobj.AddShader(&gs); pipelineobj.AddShader(&ps); // Set Input Assembly to TOPOLOGY POINT LIST VkPipelineInputAssemblyStateCreateInfo ia_state = {}; ia_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; ia_state.topology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST; pipelineobj.SetInputAssembly(&ia_state); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); m_commandBuffer->ClearAllBuffers(m_renderTargets, m_clear_color, m_depthStencil, m_depth_clear_color, m_stencil_clear_color); m_commandBuffer->PrepareAttachments(m_renderTargets, m_depthStencil); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipelineobj.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DynamicDepthBiasNotBound) { TEST_DESCRIPTION( "Run a simple draw calls to validate failure when Depth Bias dynamic state is required but not correctly bound."); ASSERT_NO_FATAL_FAILURE(Init()); // Dynamic depth bias m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic depth bias state not set for this command buffer"); VKTriangleTest(BsoFailDepthBias); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DynamicLineWidthNotBound) { TEST_DESCRIPTION( "Run a simple draw calls to validate failure when Line Width dynamic state is required but not correctly bound."); ASSERT_NO_FATAL_FAILURE(Init()); // Dynamic line width m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic line width state not set for this command buffer"); VKTriangleTest(BsoFailLineWidth); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DynamicViewportNotBound) { TEST_DESCRIPTION( "Run a simple draw calls to validate failure when Viewport dynamic state is required but not correctly bound."); ASSERT_NO_FATAL_FAILURE(Init()); // Dynamic viewport state m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic viewport(s) 0 are used by pipeline state object, but were not provided"); VKTriangleTest(BsoFailViewport); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DynamicScissorNotBound) { TEST_DESCRIPTION("Run a simple draw calls to validate failure when Scissor dynamic state is required but not correctly bound."); ASSERT_NO_FATAL_FAILURE(Init()); // Dynamic scissor state m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic scissor(s) 0 are used by pipeline state object, but were not provided"); VKTriangleTest(BsoFailScissor); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DynamicBlendConstantsNotBound) { TEST_DESCRIPTION( "Run a simple draw calls to validate failure when Blend Constants dynamic state is required but not correctly bound."); ASSERT_NO_FATAL_FAILURE(Init()); // Dynamic blend constant state m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic blend constants state not set for this command buffer"); VKTriangleTest(BsoFailBlend); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DynamicDepthBoundsNotBound) { TEST_DESCRIPTION( "Run a simple draw calls to validate failure when Depth Bounds dynamic state is required but not correctly bound."); ASSERT_NO_FATAL_FAILURE(Init()); if (!m_device->phy().features().depthBounds) { printf("%s Device does not support depthBounds test; skipped.\n", kSkipPrefix); return; } // Dynamic depth bounds m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic depth bounds state not set for this command buffer"); VKTriangleTest(BsoFailDepthBounds); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DynamicStencilReadNotBound) { TEST_DESCRIPTION( "Run a simple draw calls to validate failure when Stencil Read dynamic state is required but not correctly bound."); ASSERT_NO_FATAL_FAILURE(Init()); // Dynamic stencil read mask m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic stencil read mask state not set for this command buffer"); VKTriangleTest(BsoFailStencilReadMask); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DynamicStencilWriteNotBound) { TEST_DESCRIPTION( "Run a simple draw calls to validate failure when Stencil Write dynamic state is required but not correctly bound."); ASSERT_NO_FATAL_FAILURE(Init()); // Dynamic stencil write mask m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic stencil write mask state not set for this command buffer"); VKTriangleTest(BsoFailStencilWriteMask); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DynamicStencilRefNotBound) { TEST_DESCRIPTION( "Run a simple draw calls to validate failure when Stencil Ref dynamic state is required but not correctly bound."); ASSERT_NO_FATAL_FAILURE(Init()); // Dynamic stencil reference m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic stencil reference state not set for this command buffer"); VKTriangleTest(BsoFailStencilReference); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, IndexBufferNotBound) { TEST_DESCRIPTION("Run an indexed draw call without an index buffer bound."); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Index buffer object not bound to this command buffer when Indexed "); VKTriangleTest(BsoFailIndexBuffer); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, IndexBufferBadSize) { TEST_DESCRIPTION("Run indexed draw call with bad index buffer size."); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdDrawIndexed() index size "); VKTriangleTest(BsoFailIndexBufferBadSize); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, IndexBufferBadOffset) { TEST_DESCRIPTION("Run indexed draw call with bad index buffer offset."); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdDrawIndexed() index size "); VKTriangleTest(BsoFailIndexBufferBadOffset); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, IndexBufferBadBindSize) { TEST_DESCRIPTION("Run bind index buffer with a size greater than the index buffer."); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdDrawIndexed() index size "); VKTriangleTest(BsoFailIndexBufferBadMapSize); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, IndexBufferBadBindOffset) { TEST_DESCRIPTION("Run bind index buffer with an offset greater than the size of the index buffer."); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdDrawIndexed() index size "); VKTriangleTest(BsoFailIndexBufferBadMapOffset); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CommandBufferTwoSubmits) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT set, but has been submitted"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // We luck out b/c by default the framework creates CB w/ the // VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT set m_commandBuffer->begin(); m_commandBuffer->ClearAllBuffers(m_renderTargets, m_clear_color, nullptr, m_depth_clear_color, m_stencil_clear_color); m_commandBuffer->end(); // Bypass framework since it does the waits automatically VkResult err = VK_SUCCESS; VkSubmitInfo submit_info; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.pNext = NULL; submit_info.waitSemaphoreCount = 0; submit_info.pWaitSemaphores = NULL; submit_info.pWaitDstStageMask = NULL; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); submit_info.signalSemaphoreCount = 0; submit_info.pSignalSemaphores = NULL; err = vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); ASSERT_VK_SUCCESS(err); vkQueueWaitIdle(m_device->m_queue); // Cause validation error by re-submitting cmd buffer that should only be // submitted once err = vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); vkQueueWaitIdle(m_device->m_queue); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, AllocDescriptorFromEmptyPool) { TEST_DESCRIPTION("Attempt to allocate more sets and descriptors than descriptor pool has available."); VkResult err; ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // This test is valid for Vulkan 1.0 only -- skip if device has an API version greater than 1.0. if (m_device->props.apiVersion >= VK_API_VERSION_1_1) { printf("%s Device has apiVersion greater than 1.0 -- skipping Descriptor Set checks.\n", kSkipPrefix); return; } // Create Pool w/ 1 Sampler descriptor, but try to alloc Uniform Buffer // descriptor from it VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_SAMPLER; ds_type_count.descriptorCount = 2; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.flags = 0; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding_samp = {}; dsl_binding_samp.binding = 0; dsl_binding_samp.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; dsl_binding_samp.descriptorCount = 1; dsl_binding_samp.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding_samp.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout_samp(m_device, {dsl_binding_samp}); // Try to allocate 2 sets when pool only has 1 set VkDescriptorSet descriptor_sets[2]; VkDescriptorSetLayout set_layouts[2] = {ds_layout_samp.handle(), ds_layout_samp.handle()}; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 2; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = set_layouts; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetAllocateInfo-descriptorSetCount-00306"); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, descriptor_sets); m_errorMonitor->VerifyFound(); alloc_info.descriptorSetCount = 1; // Create layout w/ descriptor type not available in pool VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout_ub(m_device, {dsl_binding}); VkDescriptorSet descriptor_set; alloc_info.descriptorSetCount = 1; alloc_info.pSetLayouts = &ds_layout_ub.handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetAllocateInfo-descriptorPool-00307"); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptor_set); m_errorMonitor->VerifyFound(); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, FreeDescriptorFromOneShotPool) { VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkFreeDescriptorSets-descriptorPool-00312"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.flags = 0; // Not specifying VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT means // app can only call vkResetDescriptorPool on this pool.; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptorSet; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); err = vkFreeDescriptorSets(m_device->device(), ds_pool, 1, &descriptorSet); m_errorMonitor->VerifyFound(); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, InvalidDescriptorPool) { // Attempt to clear Descriptor Pool with bad object. // ObjectTracker should catch this. ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkResetDescriptorPool-descriptorPool-parameter"); uint64_t fake_pool_handle = 0xbaad6001; VkDescriptorPool bad_pool = reinterpret_cast(fake_pool_handle); vkResetDescriptorPool(device(), bad_pool, 0); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidDescriptorSet) { // Attempt to bind an invalid Descriptor Set to a valid Command Buffer // ObjectTracker should catch this. // Create a valid cmd buffer // call vkCmdBindDescriptorSets w/ false Descriptor Set uint64_t fake_set_handle = 0xbaad6001; VkDescriptorSet bad_set = reinterpret_cast(fake_set_handle); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBindDescriptorSets-pDescriptorSets-parameter"); ASSERT_NO_FATAL_FAILURE(Init()); VkDescriptorSetLayoutBinding layout_binding = {}; layout_binding.binding = 0; layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; layout_binding.descriptorCount = 1; layout_binding.stageFlags = VK_SHADER_STAGE_VERTEX_BIT; layout_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj descriptor_set_layout(m_device, {layout_binding}); const VkPipelineLayoutObj pipeline_layout(DeviceObj(), {&descriptor_set_layout}); m_commandBuffer->begin(); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &bad_set, 0, NULL); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, InvalidDescriptorSetLayout) { // Attempt to create a Pipeline Layout with an invalid Descriptor Set Layout. // ObjectTracker should catch this. uint64_t fake_layout_handle = 0xbaad6001; VkDescriptorSetLayout bad_layout = reinterpret_cast(fake_layout_handle); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-parameter"); ASSERT_NO_FATAL_FAILURE(Init()); VkPipelineLayout pipeline_layout; VkPipelineLayoutCreateInfo plci = {}; plci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; plci.pNext = NULL; plci.setLayoutCount = 1; plci.pSetLayouts = &bad_layout; vkCreatePipelineLayout(device(), &plci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, WriteDescriptorSetIntegrityCheck) { TEST_DESCRIPTION( "This test verifies some requirements of chapter 13.2.3 of the Vulkan Spec " "1) A uniform buffer update must have a valid buffer index. " "2) When using an array of descriptors in a single WriteDescriptor, the descriptor types and stageflags " "must all be the same. " "3) Immutable Sampler state must match across descriptors. " "4) That sampled image descriptors have required layouts. "); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-00324"); ASSERT_NO_FATAL_FAILURE(Init()); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; VkResult err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); OneOffDescriptorSet::Bindings bindings = { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, NULL}, {1, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, NULL}, {2, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, static_cast(&sampler)}, {3, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_FRAGMENT_BIT, NULL}}; OneOffDescriptorSet descriptor_set(m_device, bindings); ASSERT_TRUE(descriptor_set.Initialized()); VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptor_set.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; // 1) The uniform buffer is intentionally invalid here vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); // Create a buffer to update the descriptor with uint32_t qfi = 0; VkBufferCreateInfo buffCI = {}; buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffCI.size = 1024; buffCI.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buffCI.queueFamilyIndexCount = 1; buffCI.pQueueFamilyIndices = &qfi; VkBuffer dyub; err = vkCreateBuffer(m_device->device(), &buffCI, NULL, &dyub); ASSERT_VK_SUCCESS(err); VkDeviceMemory mem; VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(m_device->device(), dyub, &mem_reqs); VkMemoryAllocateInfo mem_alloc_info = {}; mem_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc_info.allocationSize = mem_reqs.size; m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); err = vkAllocateMemory(m_device->device(), &mem_alloc_info, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), dyub, mem, 0); ASSERT_VK_SUCCESS(err); VkDescriptorBufferInfo buffInfo[2] = {}; buffInfo[0].buffer = dyub; buffInfo[0].offset = 0; buffInfo[0].range = 1024; buffInfo[1].buffer = dyub; buffInfo[1].offset = 0; buffInfo[1].range = 1024; descriptor_write.pBufferInfo = buffInfo; descriptor_write.descriptorCount = 2; // 2) The stateFlags don't match between the first and second descriptor m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-dstArrayElement-00321"); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); // 3) The second descriptor has a null_ptr pImmutableSamplers and // the third descriptor contains an immutable sampler descriptor_write.dstBinding = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; // Make pImageInfo index non-null to avoid complaints of it missing VkDescriptorImageInfo imageInfo = {}; imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; descriptor_write.pImageInfo = &imageInfo; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-dstArrayElement-00321"); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); // 4) That sampled image descriptors have required layouts // Create images to update the descriptor with VkImageObj image(m_device); const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; image.Init(32, 32, 1, tex_format, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); // Attmept write with incorrect layout for sampled descriptor imageInfo.sampler = VK_NULL_HANDLE; imageInfo.imageView = image.targetView(tex_format); imageInfo.imageLayout = VK_IMAGE_LAYOUT_UNDEFINED; descriptor_write.dstBinding = 3; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-01403"); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); vkDestroyBuffer(m_device->device(), dyub, NULL); vkFreeMemory(m_device->device(), mem, NULL); vkDestroySampler(m_device->device(), sampler, NULL); } TEST_F(VkLayerTest, WriteDescriptorSetConsecutiveUpdates) { TEST_DESCRIPTION( "Verifies that updates rolling over to next descriptor work correctly by destroying buffer from consecutive update known " "to be used in descriptor set and verifying that error is flagged."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 2, VK_SHADER_STAGE_ALL, nullptr}, {1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); uint32_t qfi = 0; VkBufferCreateInfo bci = {}; bci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bci.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; bci.size = 2048; bci.queueFamilyIndexCount = 1; bci.pQueueFamilyIndices = &qfi; VkBufferObj buffer0; buffer0.init(*m_device, bci); VkPipelineObj pipe(m_device); { // Scope 2nd buffer to cause early destruction VkBufferObj buffer1; bci.size = 1024; buffer1.init(*m_device, bci); VkDescriptorBufferInfo buffer_info[3] = {}; buffer_info[0].buffer = buffer0.handle(); buffer_info[0].offset = 0; buffer_info[0].range = 1024; buffer_info[1].buffer = buffer0.handle(); buffer_info[1].offset = 1024; buffer_info[1].range = 1024; buffer_info[2].buffer = buffer1.handle(); buffer_info[2].offset = 0; buffer_info[2].range = 1024; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; // descriptor_set; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 3; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_write.pBufferInfo = buffer_info; // Update descriptor vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Create PSO that uses the uniform buffers char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 x;\n" "layout(set=0) layout(binding=0) uniform foo { int x; int y; } bar;\n" "layout(set=0) layout(binding=1) uniform blah { int x; } duh;\n" "void main(){\n" " x = vec4(duh.x, bar.y, bar.x, 1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); VkResult err = pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, 0, nullptr); VkViewport viewport = {0, 0, 16, 16, 0, 1}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); vkCmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); vkCmdEndRenderPass(m_commandBuffer->handle()); m_commandBuffer->end(); } // buffer2 just went out of scope and was destroyed along with its memory m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound Buffer "); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound DeviceMemory "); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineLayoutExceedsSetLimit) { TEST_DESCRIPTION("Attempt to create a pipeline layout using more than the physical limit of SetLayouts."); ASSERT_NO_FATAL_FAILURE(Init()); VkDescriptorSetLayoutBinding layout_binding = {}; layout_binding.binding = 0; layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; layout_binding.descriptorCount = 1; layout_binding.stageFlags = VK_SHADER_STAGE_VERTEX_BIT; layout_binding.pImmutableSamplers = NULL; VkDescriptorSetLayoutCreateInfo ds_layout_ci = {}; ds_layout_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; ds_layout_ci.bindingCount = 1; ds_layout_ci.pBindings = &layout_binding; VkDescriptorSetLayout ds_layout = {}; VkResult err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); // Create an array of DSLs, one larger than the physical limit const auto excess_layouts = 1 + m_device->phy().properties().limits.maxBoundDescriptorSets; std::vector dsl_array(excess_layouts, ds_layout); VkPipelineLayoutCreateInfo pipeline_layout_ci = {}; pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipeline_layout_ci.pNext = NULL; pipeline_layout_ci.setLayoutCount = excess_layouts; pipeline_layout_ci.pSetLayouts = dsl_array.data(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-setLayoutCount-00286"); VkPipelineLayout pipeline_layout = VK_NULL_HANDLE; err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); // Clean up vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); } TEST_F(VkLayerTest, CreatePipelineLayoutExcessPerStageDescriptors) { TEST_DESCRIPTION("Attempt to create a pipeline layout where total descriptors exceed per-stage limits"); ASSERT_NO_FATAL_FAILURE(Init()); uint32_t max_uniform_buffers = m_device->phy().properties().limits.maxPerStageDescriptorUniformBuffers; uint32_t max_storage_buffers = m_device->phy().properties().limits.maxPerStageDescriptorStorageBuffers; uint32_t max_sampled_images = m_device->phy().properties().limits.maxPerStageDescriptorSampledImages; uint32_t max_storage_images = m_device->phy().properties().limits.maxPerStageDescriptorStorageImages; uint32_t max_samplers = m_device->phy().properties().limits.maxPerStageDescriptorSamplers; uint32_t max_combined = std::min(max_samplers, max_sampled_images); uint32_t max_input_attachments = m_device->phy().properties().limits.maxPerStageDescriptorInputAttachments; uint32_t sum_dyn_uniform_buffers = m_device->phy().properties().limits.maxDescriptorSetUniformBuffersDynamic; uint32_t sum_uniform_buffers = m_device->phy().properties().limits.maxDescriptorSetUniformBuffers; uint32_t sum_dyn_storage_buffers = m_device->phy().properties().limits.maxDescriptorSetStorageBuffersDynamic; uint32_t sum_storage_buffers = m_device->phy().properties().limits.maxDescriptorSetStorageBuffers; uint32_t sum_sampled_images = m_device->phy().properties().limits.maxDescriptorSetSampledImages; uint32_t sum_storage_images = m_device->phy().properties().limits.maxDescriptorSetStorageImages; uint32_t sum_samplers = m_device->phy().properties().limits.maxDescriptorSetSamplers; uint32_t sum_input_attachments = m_device->phy().properties().limits.maxDescriptorSetInputAttachments; // Devices that report UINT32_MAX for any of these limits can't run this test if (UINT32_MAX == std::max({max_uniform_buffers, max_storage_buffers, max_sampled_images, max_storage_images, max_samplers})) { printf("%s Physical device limits report as 2^32-1. Skipping test.\n", kSkipPrefix); return; } VkDescriptorSetLayoutBinding dslb = {}; std::vector dslb_vec = {}; VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE; VkDescriptorSetLayoutCreateInfo ds_layout_ci = {}; ds_layout_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; ds_layout_ci.pNext = NULL; VkPipelineLayoutCreateInfo pipeline_layout_ci = {}; pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipeline_layout_ci.pNext = NULL; pipeline_layout_ci.setLayoutCount = 1; pipeline_layout_ci.pSetLayouts = &ds_layout; VkPipelineLayout pipeline_layout = VK_NULL_HANDLE; // VU 0fe0023e - too many sampler type descriptors in fragment stage dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; dslb.descriptorCount = max_samplers; dslb.stageFlags = VK_SHADER_STAGE_ALL_GRAPHICS; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); dslb.binding = 1; dslb.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dslb.descriptorCount = max_combined; dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); VkResult err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00287"); if ((max_samplers + max_combined) > sum_samplers) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01677"); // expect all-stages sum too } if (max_combined > sum_sampled_images) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682"); // expect all-stages sum too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00240 - too many uniform buffer type descriptors in vertex stage dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dslb.descriptorCount = max_uniform_buffers + 1; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); dslb.binding = 1; dslb.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00288"); if (dslb.descriptorCount > sum_uniform_buffers) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01678"); // expect all-stages sum too } if (dslb.descriptorCount > sum_dyn_uniform_buffers) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01679"); // expect all-stages sum too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00242 - too many storage buffer type descriptors in compute stage dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; dslb.descriptorCount = max_storage_buffers + 1; dslb.stageFlags = VK_SHADER_STAGE_ALL; dslb_vec.push_back(dslb); dslb.binding = 1; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC; dslb_vec.push_back(dslb); dslb.binding = 2; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; dslb.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00289"); if (dslb.descriptorCount > sum_dyn_storage_buffers) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01681"); // expect all-stages sum too } if (dslb_vec[0].descriptorCount + dslb_vec[2].descriptorCount > sum_storage_buffers) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01680"); // expect all-stages sum too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00244 - too many sampled image type descriptors in multiple stages dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; dslb.descriptorCount = max_sampled_images; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); dslb.binding = 1; dslb.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; dslb.stageFlags = VK_SHADER_STAGE_ALL_GRAPHICS; dslb_vec.push_back(dslb); dslb.binding = 2; dslb.descriptorCount = max_combined; dslb.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00290"); if (max_combined + 2 * max_sampled_images > sum_sampled_images) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682"); // expect all-stages sum too } if (max_combined > sum_samplers) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01677"); // expect all-stages sum too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00246 - too many storage image type descriptors in fragment stage dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; dslb.descriptorCount = 1 + (max_storage_images / 2); dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); dslb.binding = 1; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT | VK_SHADER_STAGE_COMPUTE_BIT; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00291"); if (2 * dslb.descriptorCount > sum_storage_images) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01683"); // expect all-stages sum too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00d18 - too many input attachments in fragment stage dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; dslb.descriptorCount = 1 + max_input_attachments; dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01676"); if (dslb.descriptorCount > sum_input_attachments) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01684"); // expect all-stages sum too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); } TEST_F(VkLayerTest, CreatePipelineLayoutExcessDescriptorsOverall) { TEST_DESCRIPTION("Attempt to create a pipeline layout where total descriptors exceed limits"); ASSERT_NO_FATAL_FAILURE(Init()); uint32_t max_uniform_buffers = m_device->phy().properties().limits.maxPerStageDescriptorUniformBuffers; uint32_t max_storage_buffers = m_device->phy().properties().limits.maxPerStageDescriptorStorageBuffers; uint32_t max_sampled_images = m_device->phy().properties().limits.maxPerStageDescriptorSampledImages; uint32_t max_storage_images = m_device->phy().properties().limits.maxPerStageDescriptorStorageImages; uint32_t max_samplers = m_device->phy().properties().limits.maxPerStageDescriptorSamplers; uint32_t max_input_attachments = m_device->phy().properties().limits.maxPerStageDescriptorInputAttachments; uint32_t sum_dyn_uniform_buffers = m_device->phy().properties().limits.maxDescriptorSetUniformBuffersDynamic; uint32_t sum_uniform_buffers = m_device->phy().properties().limits.maxDescriptorSetUniformBuffers; uint32_t sum_dyn_storage_buffers = m_device->phy().properties().limits.maxDescriptorSetStorageBuffersDynamic; uint32_t sum_storage_buffers = m_device->phy().properties().limits.maxDescriptorSetStorageBuffers; uint32_t sum_sampled_images = m_device->phy().properties().limits.maxDescriptorSetSampledImages; uint32_t sum_storage_images = m_device->phy().properties().limits.maxDescriptorSetStorageImages; uint32_t sum_samplers = m_device->phy().properties().limits.maxDescriptorSetSamplers; uint32_t sum_input_attachments = m_device->phy().properties().limits.maxDescriptorSetInputAttachments; // Devices that report UINT32_MAX for any of these limits can't run this test if (UINT32_MAX == std::max({sum_dyn_uniform_buffers, sum_uniform_buffers, sum_dyn_storage_buffers, sum_storage_buffers, sum_sampled_images, sum_storage_images, sum_samplers, sum_input_attachments})) { printf("%s Physical device limits report as 2^32-1. Skipping test.\n", kSkipPrefix); return; } VkDescriptorSetLayoutBinding dslb = {}; std::vector dslb_vec = {}; VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE; VkDescriptorSetLayoutCreateInfo ds_layout_ci = {}; ds_layout_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; ds_layout_ci.pNext = NULL; VkPipelineLayoutCreateInfo pipeline_layout_ci = {}; pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipeline_layout_ci.pNext = NULL; pipeline_layout_ci.setLayoutCount = 1; pipeline_layout_ci.pSetLayouts = &ds_layout; VkPipelineLayout pipeline_layout = VK_NULL_HANDLE; // VU 0fe00d1a - too many sampler type descriptors overall dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; dslb.descriptorCount = sum_samplers / 2; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); dslb.binding = 1; dslb.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dslb.descriptorCount = sum_samplers - dslb.descriptorCount + 1; dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); VkResult err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01677"); if (dslb.descriptorCount > max_samplers) { m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00287"); // Expect max-per-stage samplers exceeds limits } if (dslb.descriptorCount > sum_sampled_images) { m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682"); // Expect max overall sampled image count exceeds limits } if (dslb.descriptorCount > max_sampled_images) { m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00290"); // Expect max per-stage sampled image count exceeds limits } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00d1c - too many uniform buffer type descriptors overall dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dslb.descriptorCount = sum_uniform_buffers + 1; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01678"); if (dslb.descriptorCount > max_uniform_buffers) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00288"); // expect max-per-stage too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00d1e - too many dynamic uniform buffer type descriptors overall dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; dslb.descriptorCount = sum_dyn_uniform_buffers + 1; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01679"); if (dslb.descriptorCount > max_uniform_buffers) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00288"); // expect max-per-stage too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00d20 - too many storage buffer type descriptors overall dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; dslb.descriptorCount = sum_storage_buffers + 1; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01680"); if (dslb.descriptorCount > max_storage_buffers) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00289"); // expect max-per-stage too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00d22 - too many dynamic storage buffer type descriptors overall dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC; dslb.descriptorCount = sum_dyn_storage_buffers + 1; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01681"); if (dslb.descriptorCount > max_storage_buffers) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00289"); // expect max-per-stage too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00d24 - too many sampled image type descriptors overall dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dslb.descriptorCount = max_samplers; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); dslb.binding = 1; dslb.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; // revisit: not robust to odd limits. uint32_t remaining = (max_samplers > sum_sampled_images ? 0 : (sum_sampled_images - max_samplers) / 2); dslb.descriptorCount = 1 + remaining; dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); dslb.binding = 2; dslb.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; dslb.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682"); if (std::max(dslb_vec[0].descriptorCount, dslb_vec[1].descriptorCount) > max_sampled_images) { m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00290"); // Expect max-per-stage sampled images to exceed limits } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00d26 - too many storage image type descriptors overall dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; dslb.descriptorCount = sum_storage_images / 2; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); dslb.binding = 1; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; dslb.descriptorCount = sum_storage_images - dslb.descriptorCount + 1; dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01683"); if (dslb.descriptorCount > max_storage_images) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00291"); // expect max-per-stage too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00d28 - too many input attachment type descriptors overall dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; dslb.descriptorCount = sum_input_attachments + 1; dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01684"); if (dslb.descriptorCount > max_input_attachments) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01676"); // expect max-per-stage too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); } TEST_F(VkLayerTest, InvalidCmdBufferBufferDestroyed) { TEST_DESCRIPTION("Attempt to draw with a command buffer that is invalid due to a buffer dependency being destroyed."); ASSERT_NO_FATAL_FAILURE(Init()); VkBuffer buffer; VkDeviceMemory mem; VkMemoryRequirements mem_reqs; VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT; buf_info.size = 256; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; VkResult err = vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); vkGetBufferMemoryRequirements(m_device->device(), buffer, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.allocationSize = mem_reqs.size; bool pass = false; pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); if (!pass) { printf("%s Failed to set memory type.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), buffer, NULL); return; } err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, mem, 0); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); vkCmdFillBuffer(m_commandBuffer->handle(), buffer, 0, VK_WHOLE_SIZE, 0); m_commandBuffer->end(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound Buffer "); // Destroy buffer dependency prior to submit to cause ERROR vkDestroyBuffer(m_device->device(), buffer, NULL); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); vkFreeMemory(m_device->handle(), mem, NULL); } TEST_F(VkLayerTest, InvalidCmdBufferBufferViewDestroyed) { TEST_DESCRIPTION("Delete bufferView bound to cmd buffer, then attempt to submit cmd buffer."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count; ds_type_count.type = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; VkResult err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding layout_binding; layout_binding.binding = 0; layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; layout_binding.descriptorCount = 1; layout_binding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; layout_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {layout_binding}); VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); VkDescriptorSet descriptor_set; err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptor_set); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout}); VkBuffer buffer; uint32_t queue_family_index = 0; VkBufferCreateInfo buffer_create_info = {}; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.size = 1024; buffer_create_info.usage = VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT; buffer_create_info.queueFamilyIndexCount = 1; buffer_create_info.pQueueFamilyIndices = &queue_family_index; err = vkCreateBuffer(m_device->device(), &buffer_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements memory_reqs; VkDeviceMemory buffer_memory; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vkGetBufferMemoryRequirements(m_device->device(), buffer, &memory_reqs); memory_info.allocationSize = memory_reqs.size; bool pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &buffer_memory); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, buffer_memory, 0); ASSERT_VK_SUCCESS(err); VkBufferView view; VkBufferViewCreateInfo bvci = {}; bvci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO; bvci.buffer = buffer; bvci.format = VK_FORMAT_R32_SFLOAT; bvci.range = VK_WHOLE_SIZE; err = vkCreateBufferView(m_device->device(), &bvci, NULL, &view); ASSERT_VK_SUCCESS(err); VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptor_set; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; descriptor_write.pTexelBufferView = &view; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0, r32f) uniform readonly imageBuffer s;\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = imageLoad(s, 0);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound BufferView "); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); VkViewport viewport = {0, 0, 16, 16, 0, 1}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); // Bind pipeline to cmd buffer - This causes crash on Mali vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptor_set, 0, nullptr); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Delete BufferView in order to invalidate cmd buffer vkDestroyBufferView(m_device->device(), view, NULL); // Now attempt submit of cmd buffer VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); // Clean-up vkDestroyBuffer(m_device->device(), buffer, NULL); vkFreeMemory(m_device->device(), buffer_memory, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, InvalidCmdBufferImageDestroyed) { TEST_DESCRIPTION("Attempt to draw with a command buffer that is invalid due to an image dependency being destroyed."); ASSERT_NO_FATAL_FAILURE(Init()); VkImage image; const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = 32; image_create_info.extent.height = 32; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; image_create_info.flags = 0; VkResult err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); // Have to bind memory to image before recording cmd in cmd buffer using it VkMemoryRequirements mem_reqs; VkDeviceMemory image_mem; bool pass; VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), image, &mem_reqs); mem_alloc.allocationSize = mem_reqs.size; pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &image_mem); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), image, image_mem, 0); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); VkClearColorValue ccv; ccv.float32[0] = 1.0f; ccv.float32[1] = 1.0f; ccv.float32[2] = 1.0f; ccv.float32[3] = 1.0f; VkImageSubresourceRange isr = {}; isr.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; isr.baseArrayLayer = 0; isr.baseMipLevel = 0; isr.layerCount = 1; isr.levelCount = 1; vkCmdClearColorImage(m_commandBuffer->handle(), image, VK_IMAGE_LAYOUT_GENERAL, &ccv, 1, &isr); m_commandBuffer->end(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound Image "); // Destroy image dependency prior to submit to cause ERROR vkDestroyImage(m_device->device(), image, NULL); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); vkFreeMemory(m_device->device(), image_mem, nullptr); } TEST_F(VkLayerTest, InvalidCmdBufferFramebufferImageDestroyed) { TEST_DESCRIPTION( "Attempt to draw with a command buffer that is invalid due to a framebuffer image dependency being destroyed."); ASSERT_NO_FATAL_FAILURE(Init()); VkFormatProperties format_properties; VkResult err = VK_SUCCESS; vkGetPhysicalDeviceFormatProperties(gpu(), VK_FORMAT_B8G8R8A8_UNORM, &format_properties); if (!(format_properties.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) { printf("%s Image format doesn't support required features.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkImageCreateInfo image_ci = {}; image_ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_ci.pNext = NULL; image_ci.imageType = VK_IMAGE_TYPE_2D; image_ci.format = VK_FORMAT_B8G8R8A8_UNORM; image_ci.extent.width = 32; image_ci.extent.height = 32; image_ci.extent.depth = 1; image_ci.mipLevels = 1; image_ci.arrayLayers = 1; image_ci.samples = VK_SAMPLE_COUNT_1_BIT; image_ci.tiling = VK_IMAGE_TILING_OPTIMAL; image_ci.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; image_ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; image_ci.flags = 0; VkImage image; ASSERT_VK_SUCCESS(vkCreateImage(m_device->handle(), &image_ci, NULL, &image)); VkMemoryRequirements memory_reqs; VkDeviceMemory image_memory; bool pass; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.pNext = NULL; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), image, &memory_reqs); memory_info.allocationSize = memory_reqs.size; pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &image_memory); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), image, image_memory, 0); ASSERT_VK_SUCCESS(err); VkImageViewCreateInfo ivci = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, nullptr, 0, image, VK_IMAGE_VIEW_TYPE_2D, VK_FORMAT_B8G8R8A8_UNORM, {VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A}, {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}, }; VkImageView view; err = vkCreateImageView(m_device->device(), &ivci, nullptr, &view); ASSERT_VK_SUCCESS(err); VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, m_renderPass, 1, &view, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb); ASSERT_VK_SUCCESS(err); // Just use default renderpass with our framebuffer m_renderPassBeginInfo.framebuffer = fb; m_renderPassBeginInfo.renderArea.extent.width = 32; m_renderPassBeginInfo.renderArea.extent.height = 32; // Create Null cmd buffer for submit m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Destroy image attached to framebuffer to invalidate cmd buffer vkDestroyImage(m_device->device(), image, NULL); // Now attempt to submit cmd buffer and verify error m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound Image "); m_commandBuffer->QueueCommandBuffer(false); m_errorMonitor->VerifyFound(); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyImageView(m_device->device(), view, nullptr); vkFreeMemory(m_device->device(), image_memory, nullptr); } TEST_F(VkLayerTest, FramebufferInUseDestroyedSignaled) { TEST_DESCRIPTION("Delete in-use framebuffer."); ASSERT_NO_FATAL_FAILURE(Init()); VkFormatProperties format_properties; VkResult err = VK_SUCCESS; vkGetPhysicalDeviceFormatProperties(gpu(), VK_FORMAT_B8G8R8A8_UNORM, &format_properties); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkImageObj image(m_device); image.Init(256, 256, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView view = image.targetView(VK_FORMAT_B8G8R8A8_UNORM); VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, m_renderPass, 1, &view, 256, 256, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb); ASSERT_VK_SUCCESS(err); // Just use default renderpass with our framebuffer m_renderPassBeginInfo.framebuffer = fb; // Create Null cmd buffer for submit m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Submit cmd buffer to put it in-flight VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); // Destroy framebuffer while in-flight m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyFramebuffer-framebuffer-00892"); vkDestroyFramebuffer(m_device->device(), fb, NULL); m_errorMonitor->VerifyFound(); // Wait for queue to complete so we can safely destroy everything vkQueueWaitIdle(m_device->m_queue); m_errorMonitor->SetUnexpectedError("If framebuffer is not VK_NULL_HANDLE, framebuffer must be a valid VkFramebuffer handle"); m_errorMonitor->SetUnexpectedError("Unable to remove Framebuffer obj"); vkDestroyFramebuffer(m_device->device(), fb, nullptr); } TEST_F(VkLayerTest, FramebufferImageInUseDestroyedSignaled) { TEST_DESCRIPTION("Delete in-use image that's child of framebuffer."); ASSERT_NO_FATAL_FAILURE(Init()); VkFormatProperties format_properties; VkResult err = VK_SUCCESS; vkGetPhysicalDeviceFormatProperties(gpu(), VK_FORMAT_B8G8R8A8_UNORM, &format_properties); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkImageCreateInfo image_ci = {}; image_ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_ci.pNext = NULL; image_ci.imageType = VK_IMAGE_TYPE_2D; image_ci.format = VK_FORMAT_B8G8R8A8_UNORM; image_ci.extent.width = 256; image_ci.extent.height = 256; image_ci.extent.depth = 1; image_ci.mipLevels = 1; image_ci.arrayLayers = 1; image_ci.samples = VK_SAMPLE_COUNT_1_BIT; image_ci.tiling = VK_IMAGE_TILING_OPTIMAL; image_ci.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; image_ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; image_ci.flags = 0; VkImage image; ASSERT_VK_SUCCESS(vkCreateImage(m_device->handle(), &image_ci, NULL, &image)); VkMemoryRequirements memory_reqs; VkDeviceMemory image_memory; bool pass; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.pNext = NULL; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), image, &memory_reqs); memory_info.allocationSize = memory_reqs.size; pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &image_memory); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), image, image_memory, 0); ASSERT_VK_SUCCESS(err); VkImageViewCreateInfo ivci = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, nullptr, 0, image, VK_IMAGE_VIEW_TYPE_2D, VK_FORMAT_B8G8R8A8_UNORM, {VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A}, {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}, }; VkImageView view; err = vkCreateImageView(m_device->device(), &ivci, nullptr, &view); ASSERT_VK_SUCCESS(err); VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, m_renderPass, 1, &view, 256, 256, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb); ASSERT_VK_SUCCESS(err); // Just use default renderpass with our framebuffer m_renderPassBeginInfo.framebuffer = fb; // Create Null cmd buffer for submit m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Submit cmd buffer to put it (and attached imageView) in-flight VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); // Submit cmd buffer to put framebuffer and children in-flight vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); // Destroy image attached to framebuffer while in-flight m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyImage-image-01000"); vkDestroyImage(m_device->device(), image, NULL); m_errorMonitor->VerifyFound(); // Wait for queue to complete so we can safely destroy image and other objects vkQueueWaitIdle(m_device->m_queue); m_errorMonitor->SetUnexpectedError("If image is not VK_NULL_HANDLE, image must be a valid VkImage handle"); m_errorMonitor->SetUnexpectedError("Unable to remove Image obj"); vkDestroyImage(m_device->device(), image, NULL); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyImageView(m_device->device(), view, nullptr); vkFreeMemory(m_device->device(), image_memory, nullptr); } TEST_F(VkLayerTest, ImageMemoryNotBound) { TEST_DESCRIPTION("Attempt to draw with an image which has not had memory bound to it."); ASSERT_NO_FATAL_FAILURE(Init()); VkImage image; const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = 32; image_create_info.extent.height = 32; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; image_create_info.flags = 0; VkResult err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); // Have to bind memory to image before recording cmd in cmd buffer using it VkMemoryRequirements mem_reqs; VkDeviceMemory image_mem; bool pass; VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), image, &mem_reqs); mem_alloc.allocationSize = mem_reqs.size; pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &image_mem); ASSERT_VK_SUCCESS(err); // Introduce error, do not call vkBindImageMemory(m_device->device(), image, image_mem, 0); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " used with no memory bound. Memory should be bound by calling vkBindImageMemory()."); m_commandBuffer->begin(); VkClearColorValue ccv; ccv.float32[0] = 1.0f; ccv.float32[1] = 1.0f; ccv.float32[2] = 1.0f; ccv.float32[3] = 1.0f; VkImageSubresourceRange isr = {}; isr.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; isr.baseArrayLayer = 0; isr.baseMipLevel = 0; isr.layerCount = 1; isr.levelCount = 1; vkCmdClearColorImage(m_commandBuffer->handle(), image, VK_IMAGE_LAYOUT_GENERAL, &ccv, 1, &isr); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); vkDestroyImage(m_device->device(), image, NULL); vkFreeMemory(m_device->device(), image_mem, nullptr); } TEST_F(VkLayerTest, BufferMemoryNotBound) { TEST_DESCRIPTION("Attempt to copy from a buffer which has not had memory bound to it."); ASSERT_NO_FATAL_FAILURE(Init()); VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkBuffer buffer; VkDeviceMemory mem; VkMemoryRequirements mem_reqs; VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; buf_info.size = 1024; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; VkResult err = vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); vkGetBufferMemoryRequirements(m_device->device(), buffer, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.allocationSize = 1024; bool pass = false; pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); if (!pass) { printf("%s Failed to set memory type.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), buffer, NULL); return; } err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem); ASSERT_VK_SUCCESS(err); // Introduce failure by not calling vkBindBufferMemory(m_device->device(), buffer, mem, 0); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " used with no memory bound. Memory should be bound by calling vkBindBufferMemory()."); VkBufferImageCopy region = {}; region.bufferRowLength = 16; region.bufferImageHeight = 16; region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; region.imageSubresource.layerCount = 1; region.imageExtent.height = 4; region.imageExtent.width = 4; region.imageExtent.depth = 1; m_commandBuffer->begin(); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer, image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); vkDestroyBuffer(m_device->device(), buffer, NULL); vkFreeMemory(m_device->handle(), mem, NULL); } TEST_F(VkLayerTest, InvalidCmdBufferEventDestroyed) { TEST_DESCRIPTION("Attempt to draw with a command buffer that is invalid due to an event dependency being destroyed."); ASSERT_NO_FATAL_FAILURE(Init()); VkEvent event; VkEventCreateInfo evci = {}; evci.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; VkResult result = vkCreateEvent(m_device->device(), &evci, NULL, &event); ASSERT_VK_SUCCESS(result); m_commandBuffer->begin(); vkCmdSetEvent(m_commandBuffer->handle(), event, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT); m_commandBuffer->end(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound Event "); // Destroy event dependency prior to submit to cause ERROR vkDestroyEvent(m_device->device(), event, NULL); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidCmdBufferQueryPoolDestroyed) { TEST_DESCRIPTION("Attempt to draw with a command buffer that is invalid due to a query pool dependency being destroyed."); ASSERT_NO_FATAL_FAILURE(Init()); VkQueryPool query_pool; VkQueryPoolCreateInfo qpci{}; qpci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; qpci.queryType = VK_QUERY_TYPE_TIMESTAMP; qpci.queryCount = 1; VkResult result = vkCreateQueryPool(m_device->device(), &qpci, nullptr, &query_pool); ASSERT_VK_SUCCESS(result); m_commandBuffer->begin(); vkCmdResetQueryPool(m_commandBuffer->handle(), query_pool, 0, 1); m_commandBuffer->end(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound QueryPool "); // Destroy query pool dependency prior to submit to cause ERROR vkDestroyQueryPool(m_device->device(), query_pool, NULL); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidCmdBufferPipelineDestroyed) { TEST_DESCRIPTION("Attempt to draw with a command buffer that is invalid due to a pipeline dependency being destroyed."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); { // Use helper to create graphics pipeline CreatePipelineHelper helper(*this); helper.InitInfo(); helper.InitState(); helper.CreateGraphicsPipeline(); // Bind helper pipeline to command buffer m_commandBuffer->begin(); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, helper.pipeline_); m_commandBuffer->end(); // pipeline will be destroyed when helper goes out of scope } // Cause error by submitting command buffer that references destroyed pipeline m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound Pipeline "); m_commandBuffer->QueueCommandBuffer(false); m_errorMonitor->VerifyFound(); } TEST_F(VkPositiveLayerTest, DestroyPipelineRenderPass) { TEST_DESCRIPTION("Draw using a pipeline whose create renderPass has been destroyed."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkResult err; // Create a renderPass that's compatible with Draw-time renderPass VkAttachmentDescription att = {}; att.format = m_render_target_fmt; att.samples = VK_SAMPLE_COUNT_1_BIT; att.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; att.storeOp = VK_ATTACHMENT_STORE_OP_STORE; att.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; att.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; att.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; att.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; VkAttachmentReference ref = {}; ref.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; ref.attachment = 0; m_renderPassClearValues.clear(); VkClearValue clear = {}; clear.color = m_clear_color; VkSubpassDescription subpass = {}; subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; subpass.flags = 0; subpass.inputAttachmentCount = 0; subpass.pInputAttachments = NULL; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &ref; subpass.pResolveAttachments = NULL; subpass.pDepthStencilAttachment = NULL; subpass.preserveAttachmentCount = 0; subpass.pPreserveAttachments = NULL; VkRenderPassCreateInfo rp_info = {}; rp_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; rp_info.attachmentCount = 1; rp_info.pAttachments = &att; rp_info.subpassCount = 1; rp_info.pSubpasses = &subpass; VkRenderPass rp; err = vkCreateRenderPass(device(), &rp_info, NULL, &rp); ASSERT_VK_SUCCESS(err); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; m_viewports.push_back(viewport); pipe.SetViewport(m_viewports); VkRect2D rect = {{0, 0}, {64, 64}}; m_scissors.push_back(rect); pipe.SetScissor(m_scissors); const VkPipelineLayoutObj pl(m_device); pipe.CreateVKPipeline(pl.handle(), rp); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); // Destroy renderPass before pipeline is used in Draw // We delay until after CmdBindPipeline to verify that invalid binding isn't // created between CB & renderPass, which we used to do. vkDestroyRenderPass(m_device->device(), rp, nullptr); vkCmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); vkCmdEndRenderPass(m_commandBuffer->handle()); m_commandBuffer->end(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyNotFound(); vkQueueWaitIdle(m_device->m_queue); } TEST_F(VkLayerTest, InvalidCmdBufferDescriptorSetBufferDestroyed) { TEST_DESCRIPTION( "Attempt to draw with a command buffer that is invalid due to a bound descriptor set with a buffer dependency being " "destroyed."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; VkResult err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptorSet; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout}); // Create a buffer to update the descriptor with uint32_t qfi = 0; VkBufferCreateInfo buffCI = {}; buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffCI.size = 1024; buffCI.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buffCI.queueFamilyIndexCount = 1; buffCI.pQueueFamilyIndices = &qfi; VkBuffer buffer; err = vkCreateBuffer(m_device->device(), &buffCI, NULL, &buffer); ASSERT_VK_SUCCESS(err); // Allocate memory and bind to buffer so we can make it to the appropriate // error VkMemoryRequirements memReqs; vkGetBufferMemoryRequirements(m_device->device(), buffer, &memReqs); VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = memReqs.size; mem_alloc.memoryTypeIndex = 0; bool pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &mem_alloc, 0); if (!pass) { printf("%s Failed to set memory type.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), buffer, NULL); return; } VkDeviceMemory mem; err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, mem, 0); ASSERT_VK_SUCCESS(err); // Correctly update descriptor to avoid "NOT_UPDATED" error VkDescriptorBufferInfo buffInfo = {}; buffInfo.buffer = buffer; buffInfo.offset = 0; buffInfo.range = 1024; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptorSet; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_write.pBufferInfo = &buffInfo; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Create PSO to be used for draw-time errors below char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 x;\n" "layout(set=0) layout(binding=0) uniform foo { int x; int y; } bar;\n" "void main(){\n" " x = vec4(bar.y);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 0, NULL); vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &m_viewports[0]); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &m_scissors[0]); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound Buffer "); // Destroy buffer should invalidate the cmd buffer, causing error on submit vkDestroyBuffer(m_device->device(), buffer, NULL); // Attempt to submit cmd buffer VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); // Cleanup vkFreeMemory(m_device->device(), mem, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, InvalidCmdBufferDescriptorSetImageSamplerDestroyed) { TEST_DESCRIPTION( "Attempt to draw with a command buffer that is invalid due to a bound descriptor sets with a combined image sampler having " "their image, sampler, and descriptor set each respectively destroyed and then attempting to submit associated cmd " "buffers. Attempt to destroy a DescriptorSet that is in use."); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; VkResult err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptorSet; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout}); // Create images to update the descriptor with VkImage image; VkImage image2; const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; const int32_t tex_width = 32; const int32_t tex_height = 32; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = tex_width; image_create_info.extent.height = tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT; image_create_info.flags = 0; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image2); ASSERT_VK_SUCCESS(err); VkMemoryRequirements memory_reqs; VkDeviceMemory image_memory; bool pass; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.pNext = NULL; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), image, &memory_reqs); // Allocate enough memory for both images VkDeviceSize align_mod = memory_reqs.size % memory_reqs.alignment; VkDeviceSize aligned_size = ((align_mod == 0) ? memory_reqs.size : (memory_reqs.size + memory_reqs.alignment - align_mod)); memory_info.allocationSize = aligned_size * 2; pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &image_memory); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), image, image_memory, 0); ASSERT_VK_SUCCESS(err); // Bind second image to memory right after first image err = vkBindImageMemory(m_device->device(), image2, image_memory, aligned_size); ASSERT_VK_SUCCESS(err); VkImageViewCreateInfo image_view_create_info = {}; image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; image_view_create_info.image = image; image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D; image_view_create_info.format = tex_format; image_view_create_info.subresourceRange.layerCount = 1; image_view_create_info.subresourceRange.baseMipLevel = 0; image_view_create_info.subresourceRange.levelCount = 1; image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; VkImageView tmp_view; // First test deletes this view VkImageView view; VkImageView view2; err = vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &tmp_view); ASSERT_VK_SUCCESS(err); err = vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &view); ASSERT_VK_SUCCESS(err); image_view_create_info.image = image2; err = vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &view2); ASSERT_VK_SUCCESS(err); // Create Samplers VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; VkSampler sampler2; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler2); ASSERT_VK_SUCCESS(err); // Update descriptor with image and sampler VkDescriptorImageInfo img_info = {}; img_info.sampler = sampler; img_info.imageView = tmp_view; img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptorSet; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &img_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Create PSO to be used for draw-time errors below char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform sampler2D s;\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = texture(s, vec2(1));\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); // First error case is destroying sampler prior to cmd buffer submission m_commandBuffer->begin(); // Transit image layout from VK_IMAGE_LAYOUT_UNDEFINED into VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL VkImageMemoryBarrier barrier = {}; barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED; barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; barrier.image = image; barrier.srcAccessMask = 0; barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; barrier.subresourceRange.baseMipLevel = 0; barrier.subresourceRange.levelCount = 1; barrier.subresourceRange.baseArrayLayer = 0; barrier.subresourceRange.layerCount = 1; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 0, NULL); VkViewport viewport = {0, 0, 16, 16, 0, 1}; VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); // This first submit should be successful vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); vkQueueWaitIdle(m_device->m_queue); // Now destroy imageview and reset cmdBuffer vkDestroyImageView(m_device->device(), tmp_view, NULL); m_commandBuffer->reset(0); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 0, NULL); vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that has been destroyed."); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Re-update descriptor with new view img_info.imageView = view; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Now test destroying sampler prior to cmd buffer submission m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 0, NULL); vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Destroy sampler invalidates the cmd buffer, causing error on submit vkDestroySampler(m_device->device(), sampler, NULL); // Attempt to submit cmd buffer m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "that is invalid because bound Sampler"); submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); // Now re-update descriptor with valid sampler and delete image img_info.sampler = sampler2; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); VkCommandBufferBeginInfo info = {}; info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; info.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound Image "); m_commandBuffer->begin(&info); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 0, NULL); vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Destroy image invalidates the cmd buffer, causing error on submit vkDestroyImage(m_device->device(), image, NULL); // Attempt to submit cmd buffer submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); // Now update descriptor to be valid, but then free descriptor img_info.imageView = view2; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_commandBuffer->begin(&info); // Transit image2 layout from VK_IMAGE_LAYOUT_UNDEFINED into VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL barrier.image = image2; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 0, NULL); vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); // Immediately try to destroy the descriptor set in the active command buffer - failure expected m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Cannot call vkFreeDescriptorSets() on descriptor set 0x"); vkFreeDescriptorSets(m_device->device(), ds_pool, 1, &descriptorSet); m_errorMonitor->VerifyFound(); // Try again once the queue is idle - should succeed w/o error // TODO - though the particular error above doesn't re-occur, there are other 'unexpecteds' still to clean up vkQueueWaitIdle(m_device->m_queue); m_errorMonitor->SetUnexpectedError( "pDescriptorSets must be a valid pointer to an array of descriptorSetCount VkDescriptorSet handles, each element of which " "must either be a valid handle or VK_NULL_HANDLE"); m_errorMonitor->SetUnexpectedError("Unable to remove DescriptorSet obj"); vkFreeDescriptorSets(m_device->device(), ds_pool, 1, &descriptorSet); // Attempt to submit cmd buffer containing the freed descriptor set submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound DescriptorSet "); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); // Cleanup vkFreeMemory(m_device->device(), image_memory, NULL); vkDestroySampler(m_device->device(), sampler2, NULL); vkDestroyImage(m_device->device(), image2, NULL); vkDestroyImageView(m_device->device(), view, NULL); vkDestroyImageView(m_device->device(), view2, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, InvalidDescriptorSetSamplerDestroyed) { TEST_DESCRIPTION("Attempt to draw with a bound descriptor sets with a combined image sampler where sampler has been deleted."); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); // Create images to update the descriptor with VkImageObj image(m_device); const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; image.Init(32, 32, 1, tex_format, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageViewCreateInfo image_view_create_info = {}; image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; image_view_create_info.image = image.handle(); image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D; image_view_create_info.format = tex_format; image_view_create_info.subresourceRange.layerCount = 1; image_view_create_info.subresourceRange.baseMipLevel = 0; image_view_create_info.subresourceRange.levelCount = 1; image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; VkImageView view; VkResult err = vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &view); ASSERT_VK_SUCCESS(err); // Create Samplers VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); // Update descriptor with image and sampler VkDescriptorImageInfo img_info = {}; img_info.sampler = sampler; img_info.imageView = view; img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &img_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Destroy the sampler before it's bound to the cmd buffer vkDestroySampler(m_device->device(), sampler, NULL); // Create PSO to be used for draw-time errors below char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform sampler2D s;\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = texture(s, vec2(1));\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); // First error case is destroying sampler prior to cmd buffer submission m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, 0, NULL); VkViewport viewport = {0, 0, 16, 16, 0, 1}; VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " Descriptor in binding #0 at global descriptor index 0 is using sampler "); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); vkDestroyImageView(m_device->device(), view, NULL); } TEST_F(VkLayerTest, ImageDescriptorLayoutMismatchInternal) { TEST_DESCRIPTION("Create an image sampler layout->image layout mismatch within a command buffer"); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; VkResult err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptorSet; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout}); // Create images to update the descriptor with const VkFormat format = VK_FORMAT_B8G8R8A8_UNORM; VkImageObj image(m_device); image.Init(32, 32, 1, format, VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageViewCreateInfo image_view_create_info = {}; image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; image_view_create_info.image = image.handle(); image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D; image_view_create_info.format = format; image_view_create_info.subresourceRange.layerCount = 1; image_view_create_info.subresourceRange.baseMipLevel = 0; image_view_create_info.subresourceRange.levelCount = 1; image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; VkImageView view; err = vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &view); ASSERT_VK_SUCCESS(err); // Create Sampler VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); // Update descriptor with image and sampler VkDescriptorImageInfo img_info = {}; img_info.sampler = sampler; img_info.imageView = view; // This should cause a mis-match. Actual layout at use time is SHADER_RO img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptorSet; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &img_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Create PSO to be used for draw-time errors below char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform sampler2D s;\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = texture(s, vec2(1));\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); VkCommandBufferObj cmd_buf(m_device, m_commandPool); cmd_buf.begin(); // record layout different than actual descriptor layout of SHADER_RO image.SetLayout(&cmd_buf, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); cmd_buf.BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(cmd_buf.handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(cmd_buf.handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 0, NULL); VkViewport viewport = {0, 0, 16, 16, 0, 1}; VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetViewport(cmd_buf.handle(), 0, 1, &viewport); vkCmdSetScissor(cmd_buf.handle(), 0, 1, &scissor); // At draw time the update layout will mis-match the actual layout m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorImageInfo-imageLayout-00344"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "UNASSIGNED-CoreValidation-DrawState-DescriptorSetNotUpdated"); cmd_buf.Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); cmd_buf.EndRenderPass(); cmd_buf.end(); // Submit cmd buffer VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &cmd_buf.handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); vkQueueWaitIdle(m_device->m_queue); // Cleanup vkDestroySampler(m_device->device(), sampler, NULL); vkDestroyImageView(m_device->device(), view, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, ImageDescriptorLayoutMismatchExternal) { TEST_DESCRIPTION("Create an image sampler layout->image layout mismatch external to a command buffer"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; VkResult err; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkImageObj image(m_device); // Transition image to be used in shader to SHADER_READ_ONLY_OPTIMAL image.Init(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView view; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_R8G8B8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; err = vkCreateImageView(m_device->device(), &ivci, NULL, &view); ASSERT_VK_SUCCESS(err); VkDescriptorImageInfo image_info{}; // Set error condition -- anything but Shader_Read_Only_Optimal which is the current image layout image_info.imageLayout = VK_IMAGE_LAYOUT_GENERAL; image_info.imageView = view; image_info.sampler = sampler; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &image_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform sampler2D s;\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = texture(s, vec2(1));\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "UNASSIGNED-CoreValidation-DrawState-InvalidImageLayout"); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); // Bind pipeline to cmd buffer vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, 0, nullptr); VkViewport viewport = {0, 0, 16, 16, 0, 1}; VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); vkDestroyImageView(m_device->device(), view, NULL); vkDestroySampler(m_device->device(), sampler, nullptr); } TEST_F(VkLayerTest, DescriptorPoolInUseDestroyedSignaled) { TEST_DESCRIPTION("Delete a DescriptorPool with a DescriptorSet that is in use."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; VkResult err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptor_set; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptor_set); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout}); // Create image to update the descriptor with VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView view = image.targetView(VK_FORMAT_B8G8R8A8_UNORM); // Create Sampler VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); // Update descriptor with image and sampler VkDescriptorImageInfo img_info = {}; img_info.sampler = sampler; img_info.imageView = view; img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptor_set; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &img_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Create PSO to be used for draw-time errors below char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform sampler2D s;\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = texture(s, vec2(1));\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptor_set, 0, NULL); VkViewport viewport = {0, 0, 16, 16, 0, 1}; VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Submit cmd buffer to put pool in-flight VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); // Destroy pool while in-flight, causing error m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyDescriptorPool-descriptorPool-00303"); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); // Cleanup vkDestroySampler(m_device->device(), sampler, NULL); m_errorMonitor->SetUnexpectedError( "If descriptorPool is not VK_NULL_HANDLE, descriptorPool must be a valid VkDescriptorPool handle"); m_errorMonitor->SetUnexpectedError("Unable to remove DescriptorPool obj"); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); // TODO : It seems Validation layers think ds_pool was already destroyed, even though it wasn't? } TEST_F(VkLayerTest, DescriptorPoolInUseResetSignaled) { TEST_DESCRIPTION("Reset a DescriptorPool with a DescriptorSet that is in use."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = nullptr; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; VkResult err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, nullptr, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = nullptr; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptor_set; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptor_set); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout}); // Create image to update the descriptor with VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView view = image.targetView(VK_FORMAT_B8G8R8A8_UNORM); // Create Sampler VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, nullptr, &sampler); ASSERT_VK_SUCCESS(err); // Update descriptor with image and sampler VkDescriptorImageInfo img_info = {}; img_info.sampler = sampler; img_info.imageView = view; img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptor_set; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &img_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, nullptr); // Create PSO to be used for draw-time errors below char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform sampler2D s;\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = texture(s, vec2(1));\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptor_set, 0, nullptr); VkViewport viewport = {0, 0, 16, 16, 0, 1}; VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Submit cmd buffer to put pool in-flight VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); // Reset pool while in-flight, causing error m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkResetDescriptorPool-descriptorPool-00313"); vkResetDescriptorPool(m_device->device(), ds_pool, 0); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); // Cleanup vkDestroySampler(m_device->device(), sampler, nullptr); m_errorMonitor->SetUnexpectedError( "If descriptorPool is not VK_NULL_HANDLE, descriptorPool must be a valid VkDescriptorPool handle"); m_errorMonitor->SetUnexpectedError("Unable to remove DescriptorPool obj"); vkDestroyDescriptorPool(m_device->device(), ds_pool, nullptr); } TEST_F(VkLayerTest, DescriptorImageUpdateNoMemoryBound) { TEST_DESCRIPTION("Attempt an image descriptor set update where image's bound memory has been freed."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; VkResult err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptorSet; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout}); // Create images to update the descriptor with VkImage image; const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; const int32_t tex_width = 32; const int32_t tex_height = 32; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = tex_width; image_create_info.extent.height = tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT; image_create_info.flags = 0; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); // Initially bind memory to avoid error at bind view time. We'll break binding before update. VkMemoryRequirements memory_reqs; VkDeviceMemory image_memory; bool pass; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.pNext = NULL; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), image, &memory_reqs); // Allocate enough memory for image memory_info.allocationSize = memory_reqs.size; pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &image_memory); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), image, image_memory, 0); ASSERT_VK_SUCCESS(err); VkImageViewCreateInfo image_view_create_info = {}; image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; image_view_create_info.image = image; image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D; image_view_create_info.format = tex_format; image_view_create_info.subresourceRange.layerCount = 1; image_view_create_info.subresourceRange.baseMipLevel = 0; image_view_create_info.subresourceRange.levelCount = 1; image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; VkImageView view; err = vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &view); ASSERT_VK_SUCCESS(err); // Create Samplers VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); // Update descriptor with image and sampler VkDescriptorImageInfo img_info = {}; img_info.sampler = sampler; img_info.imageView = view; img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptorSet; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &img_info; // Break memory binding and attempt update vkFreeMemory(m_device->device(), image_memory, nullptr); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " previously bound memory was freed. Memory must not be freed prior to this operation."); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkUpdateDescriptorSets() failed write update validation for Descriptor Set 0x"); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); // Cleanup vkDestroyImage(m_device->device(), image, NULL); vkDestroySampler(m_device->device(), sampler, NULL); vkDestroyImageView(m_device->device(), view, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, InvalidPipeline) { uint64_t fake_pipeline_handle = 0xbaad6001; VkPipeline bad_pipeline = reinterpret_cast(fake_pipeline_handle); // Enable VK_KHR_draw_indirect_count for KHR variants ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DRAW_INDIRECT_COUNT_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_DRAW_INDIRECT_COUNT_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitState()); bool has_khr_indirect = DeviceExtensionEnabled(VK_KHR_DRAW_INDIRECT_COUNT_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Attempt to bind an invalid Pipeline to a valid Command Buffer m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBindPipeline-pipeline-parameter"); m_commandBuffer->begin(); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, bad_pipeline); m_errorMonitor->VerifyFound(); // Try each of the 6 flavors of Draw() m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); // Draw*() calls must be submitted within a renderpass m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDraw-None-00442"); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawIndexed-None-00461"); m_commandBuffer->DrawIndexed(1, 1, 0, 0, 0); m_errorMonitor->VerifyFound(); VkBufferObj buffer; VkBufferCreateInfo ci = {}; ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; ci.usage = VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT; ci.size = 1024; buffer.init(*m_device, ci); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawIndirect-None-00485"); vkCmdDrawIndirect(m_commandBuffer->handle(), buffer.handle(), 0, 1, 0); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawIndexedIndirect-None-00537"); vkCmdDrawIndexedIndirect(m_commandBuffer->handle(), buffer.handle(), 0, 1, 0); m_errorMonitor->VerifyFound(); if (has_khr_indirect) { auto fpCmdDrawIndirectCountKHR = (PFN_vkCmdDrawIndirectCountKHR)vkGetDeviceProcAddr(m_device->device(), "vkCmdDrawIndirectCountKHR"); ASSERT_NE(fpCmdDrawIndirectCountKHR, nullptr); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawIndirectCountKHR-None-03119"); // stride must be a multiple of 4 and must be greater than or equal to sizeof(VkDrawIndirectCommand) fpCmdDrawIndirectCountKHR(m_commandBuffer->handle(), buffer.handle(), 0, buffer.handle(), 512, 1, 512); m_errorMonitor->VerifyFound(); auto fpCmdDrawIndexedIndirectCountKHR = (PFN_vkCmdDrawIndexedIndirectCountKHR)vkGetDeviceProcAddr(m_device->device(), "vkCmdDrawIndexedIndirectCountKHR"); ASSERT_NE(fpCmdDrawIndexedIndirectCountKHR, nullptr); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawIndexedIndirectCountKHR-None-03151"); // stride must be a multiple of 4 and must be greater than or equal to sizeof(VkDrawIndexedIndirectCommand) fpCmdDrawIndexedIndirectCountKHR(m_commandBuffer->handle(), buffer.handle(), 0, buffer.handle(), 512, 1, 512); m_errorMonitor->VerifyFound(); } // Also try the Dispatch variants vkCmdEndRenderPass(m_commandBuffer->handle()); // Compute submissions must be outside a renderpass m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatch-None-00391"); vkCmdDispatch(m_commandBuffer->handle(), 0, 0, 0); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatchIndirect-None-00404"); vkCmdDispatchIndirect(m_commandBuffer->handle(), buffer.handle(), 0); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CmdDispatchExceedLimits) { TEST_DESCRIPTION("Compute dispatch with dimensions that exceed device limits"); // Enable KHX device group extensions, if available if (InstanceExtensionSupported(VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool khx_dg_ext_available = false; if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DEVICE_GROUP_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_DEVICE_GROUP_EXTENSION_NAME); khx_dg_ext_available = true; } ASSERT_NO_FATAL_FAILURE(InitState()); uint32_t x_limit = m_device->props.limits.maxComputeWorkGroupCount[0]; uint32_t y_limit = m_device->props.limits.maxComputeWorkGroupCount[1]; uint32_t z_limit = m_device->props.limits.maxComputeWorkGroupCount[2]; if (std::max({x_limit, y_limit, z_limit}) == UINT32_MAX) { printf("%s device maxComputeWorkGroupCount limit reports UINT32_MAX, test not possible, skipping.\n", kSkipPrefix); return; } // Create a minimal compute pipeline std::string cs_text = "#version 450\nvoid main() {}\n"; // minimal no-op shader VkShaderObj cs_obj(m_device, cs_text.c_str(), VK_SHADER_STAGE_COMPUTE_BIT, this); VkPipelineLayoutCreateInfo info = {}; info.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; info.pNext = nullptr; VkPipelineLayout pipe_layout; vkCreatePipelineLayout(device(), &info, nullptr, &pipe_layout); VkComputePipelineCreateInfo pipeline_info = {}; pipeline_info.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO; pipeline_info.pNext = nullptr; pipeline_info.flags = khx_dg_ext_available ? VK_PIPELINE_CREATE_DISPATCH_BASE_KHR : 0; pipeline_info.layout = pipe_layout; pipeline_info.basePipelineHandle = VK_NULL_HANDLE; pipeline_info.basePipelineIndex = -1; pipeline_info.stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; pipeline_info.stage.pNext = nullptr; pipeline_info.stage.flags = 0; pipeline_info.stage.stage = VK_SHADER_STAGE_COMPUTE_BIT; pipeline_info.stage.module = cs_obj.handle(); pipeline_info.stage.pName = "main"; pipeline_info.stage.pSpecializationInfo = nullptr; VkPipeline cs_pipeline; vkCreateComputePipelines(device(), VK_NULL_HANDLE, 1, &pipeline_info, nullptr, &cs_pipeline); // Bind pipeline to command buffer m_commandBuffer->begin(); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_COMPUTE, cs_pipeline); // Dispatch counts that exceed device limits m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatch-groupCountX-00386"); vkCmdDispatch(m_commandBuffer->handle(), x_limit + 1, y_limit, z_limit); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatch-groupCountY-00387"); vkCmdDispatch(m_commandBuffer->handle(), x_limit, y_limit + 1, z_limit); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatch-groupCountZ-00388"); vkCmdDispatch(m_commandBuffer->handle(), x_limit, y_limit, z_limit + 1); m_errorMonitor->VerifyFound(); if (khx_dg_ext_available) { PFN_vkCmdDispatchBaseKHR fp_vkCmdDispatchBaseKHR = (PFN_vkCmdDispatchBaseKHR)vkGetInstanceProcAddr(instance(), "vkCmdDispatchBaseKHR"); // Base equals or exceeds limit m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatchBase-baseGroupX-00421"); fp_vkCmdDispatchBaseKHR(m_commandBuffer->handle(), x_limit, y_limit - 1, z_limit - 1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatchBase-baseGroupX-00422"); fp_vkCmdDispatchBaseKHR(m_commandBuffer->handle(), x_limit - 1, y_limit, z_limit - 1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatchBase-baseGroupZ-00423"); fp_vkCmdDispatchBaseKHR(m_commandBuffer->handle(), x_limit - 1, y_limit - 1, z_limit, 0, 0, 0); m_errorMonitor->VerifyFound(); // (Base + count) exceeds limit uint32_t x_base = x_limit / 2; uint32_t y_base = y_limit / 2; uint32_t z_base = z_limit / 2; x_limit -= x_base; y_limit -= y_base; z_limit -= z_base; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatchBase-groupCountX-00424"); fp_vkCmdDispatchBaseKHR(m_commandBuffer->handle(), x_base, y_base, z_base, x_limit + 1, y_limit, z_limit); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatchBase-groupCountY-00425"); fp_vkCmdDispatchBaseKHR(m_commandBuffer->handle(), x_base, y_base, z_base, x_limit, y_limit + 1, z_limit); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatchBase-groupCountZ-00426"); fp_vkCmdDispatchBaseKHR(m_commandBuffer->handle(), x_base, y_base, z_base, x_limit, y_limit, z_limit + 1); m_errorMonitor->VerifyFound(); } else { printf("%s KHX_DEVICE_GROUP_* extensions not supported, skipping CmdDispatchBaseKHR() tests.\n", kSkipPrefix); } // Clean up vkDestroyPipeline(device(), cs_pipeline, nullptr); vkDestroyPipelineLayout(device(), pipe_layout, nullptr); } TEST_F(VkLayerTest, MultiplaneImageLayoutBadAspectFlags) { TEST_DESCRIPTION("Query layout of a multiplane image using illegal aspect flag masks"); // Enable KHR multiplane req'd extensions bool mp_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION); if (mp_extensions) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); if (mp_extensions) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); } else { printf("%s test requires KHR multiplane extensions, not available. Skipping.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkImageCreateInfo ci = {}; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = VK_FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR; ci.extent = {128, 128, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_LINEAR; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Verify formats bool supported = ImageFormatAndFeaturesSupported(instance(), gpu(), ci, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT); ci.format = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR; supported = supported && ImageFormatAndFeaturesSupported(instance(), gpu(), ci, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT); if (!supported) { printf("%s Multiplane image format not supported. Skipping test.\n", kSkipPrefix); return; // Assume there's low ROI on searching for different mp formats } VkImage image_2plane, image_3plane; ci.format = VK_FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR; VkResult err = vkCreateImage(device(), &ci, NULL, &image_2plane); ASSERT_VK_SUCCESS(err); ci.format = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR; err = vkCreateImage(device(), &ci, NULL, &image_3plane); ASSERT_VK_SUCCESS(err); // Query layout of 3rd plane, for a 2-plane image VkImageSubresource subres = {}; subres.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR; subres.mipLevel = 0; subres.arrayLayer = 0; VkSubresourceLayout layout = {}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetImageSubresourceLayout-format-01581"); vkGetImageSubresourceLayout(device(), image_2plane, &subres, &layout); m_errorMonitor->VerifyFound(); // Query layout using color aspect, for a 3-plane image subres.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetImageSubresourceLayout-format-01582"); vkGetImageSubresourceLayout(device(), image_3plane, &subres, &layout); m_errorMonitor->VerifyFound(); // Clean up vkDestroyImage(device(), image_2plane, NULL); vkDestroyImage(device(), image_3plane, NULL); } TEST_F(VkPositiveLayerTest, MultiplaneGetImageSubresourceLayout) { TEST_DESCRIPTION("Positive test, query layout of a single plane of a multiplane image. (repro Github #2530)"); // Enable KHR multiplane req'd extensions bool mp_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION); if (mp_extensions) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); if (mp_extensions) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); } else { printf("%s test requires KHR multiplane extensions, not available. Skipping.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkImageCreateInfo ci = {}; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR; ci.extent = {128, 128, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_LINEAR; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Verify format bool supported = ImageFormatAndFeaturesSupported(instance(), gpu(), ci, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT); if (!supported) { printf("%s Multiplane image format not supported. Skipping test.\n", kSkipPrefix); return; // Assume there's low ROI on searching for different mp formats } VkImage image; VkResult err = vkCreateImage(device(), &ci, NULL, &image); ASSERT_VK_SUCCESS(err); // Query layout of 3rd plane VkImageSubresource subres = {}; subres.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR; subres.mipLevel = 0; subres.arrayLayer = 0; VkSubresourceLayout layout = {}; m_errorMonitor->ExpectSuccess(); vkGetImageSubresourceLayout(device(), image, &subres, &layout); m_errorMonitor->VerifyNotFound(); vkDestroyImage(device(), image, NULL); } TEST_F(VkLayerTest, InvalidBufferViewObject) { // Create a single TEXEL_BUFFER descriptor and send it an invalid bufferView // First, cause the bufferView to be invalid due to underlying buffer being destroyed // Then destroy view itself and verify that same error is hit VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-00323"); ASSERT_NO_FATAL_FAILURE(Init()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptorSet; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); // Create a valid bufferView to start with VkBuffer buffer; uint32_t queue_family_index = 0; VkBufferCreateInfo buffer_create_info = {}; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.size = 1024; buffer_create_info.usage = VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT; buffer_create_info.queueFamilyIndexCount = 1; buffer_create_info.pQueueFamilyIndices = &queue_family_index; err = vkCreateBuffer(m_device->device(), &buffer_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements memory_reqs; VkDeviceMemory buffer_memory; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vkGetBufferMemoryRequirements(m_device->device(), buffer, &memory_reqs); memory_info.allocationSize = memory_reqs.size; bool pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &buffer_memory); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, buffer_memory, 0); ASSERT_VK_SUCCESS(err); VkBufferView view; VkBufferViewCreateInfo bvci = {}; bvci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO; bvci.buffer = buffer; bvci.format = VK_FORMAT_R32_SFLOAT; bvci.range = VK_WHOLE_SIZE; err = vkCreateBufferView(m_device->device(), &bvci, NULL, &view); ASSERT_VK_SUCCESS(err); // First Destroy buffer underlying view which should hit error in CV vkDestroyBuffer(m_device->device(), buffer, NULL); VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptorSet; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; descriptor_write.pTexelBufferView = &view; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); // Now destroy view itself and verify same error, which is hit in PV this time vkDestroyBufferView(m_device->device(), view, NULL); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-00323"); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); vkFreeMemory(m_device->device(), buffer_memory, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, CreateBufferViewNoMemoryBoundToBuffer) { TEST_DESCRIPTION("Attempt to create a buffer view with a buffer that has no memory bound to it."); VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " used with no memory bound. Memory should be bound by calling vkBindBufferMemory()."); ASSERT_NO_FATAL_FAILURE(Init()); // Create a buffer with no bound memory and then attempt to create // a buffer view. VkBufferCreateInfo buff_ci = {}; buff_ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buff_ci.usage = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT; buff_ci.size = 256; buff_ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; VkBuffer buffer; err = vkCreateBuffer(m_device->device(), &buff_ci, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkBufferViewCreateInfo buff_view_ci = {}; buff_view_ci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO; buff_view_ci.buffer = buffer; buff_view_ci.format = VK_FORMAT_R8_UNORM; buff_view_ci.range = VK_WHOLE_SIZE; VkBufferView buff_view; err = vkCreateBufferView(m_device->device(), &buff_view_ci, NULL, &buff_view); m_errorMonitor->VerifyFound(); vkDestroyBuffer(m_device->device(), buffer, NULL); // If last error is success, it still created the view, so delete it. if (err == VK_SUCCESS) { vkDestroyBufferView(m_device->device(), buff_view, NULL); } } TEST_F(VkLayerTest, InvalidBufferViewCreateInfoEntries) { TEST_DESCRIPTION("Attempt to create a buffer view with invalid create info."); ASSERT_NO_FATAL_FAILURE(Init()); const VkPhysicalDeviceLimits &dev_limits = m_device->props.limits; const VkDeviceSize minTexelBufferOffsetAlignment = dev_limits.minTexelBufferOffsetAlignment; if (minTexelBufferOffsetAlignment == 1) { printf("%s Test requires minTexelOffsetAlignment to not be equal to 1. \n", kSkipPrefix); return; } const VkFormat format_with_uniform_texel_support = VK_FORMAT_R8G8B8A8_UNORM; const char *format_with_uniform_texel_support_string = "VK_FORMAT_R8G8B8A8_UNORM"; const VkFormat format_without_texel_support = VK_FORMAT_R8G8B8_UNORM; const char *format_without_texel_support_string = "VK_FORMAT_R8G8B8_UNORM"; VkFormatProperties format_properties; vkGetPhysicalDeviceFormatProperties(gpu(), format_with_uniform_texel_support, &format_properties); if (!(format_properties.bufferFeatures & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT)) { printf("%s Test requires %s to support VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT\n", kSkipPrefix, format_with_uniform_texel_support_string); return; } vkGetPhysicalDeviceFormatProperties(gpu(), format_without_texel_support, &format_properties); if ((format_properties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT) || (format_properties.bufferFeatures & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT)) { printf( "%s Test requires %s to not support VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT nor " "VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT\n", kSkipPrefix, format_without_texel_support_string); return; } // Create a test buffer--buffer must have been created using VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT or // VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, so use a different usage value instead to cause an error const VkDeviceSize resource_size = 1024; const VkBufferCreateInfo bad_buffer_info = VkBufferObj::create_info(resource_size, VK_BUFFER_USAGE_INDEX_BUFFER_BIT); VkBufferObj bad_buffer; bad_buffer.init(*m_device, bad_buffer_info, (VkMemoryPropertyFlags)VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); // Create a test buffer view VkBufferViewCreateInfo buff_view_ci = {}; buff_view_ci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO; buff_view_ci.buffer = bad_buffer.handle(); buff_view_ci.format = format_with_uniform_texel_support; buff_view_ci.range = VK_WHOLE_SIZE; auto CatchError = [this, &buff_view_ci](const string &desired_error_string) { VkBufferView buff_view; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, desired_error_string); VkResult err = vkCreateBufferView(m_device->device(), &buff_view_ci, NULL, &buff_view); m_errorMonitor->VerifyFound(); // If previous error is success, it still created the view, so delete it if (err == VK_SUCCESS) { vkDestroyBufferView(m_device->device(), buff_view, NULL); } }; CatchError("VUID-VkBufferViewCreateInfo-buffer-00932"); // Create a better test buffer const VkBufferCreateInfo buffer_info = VkBufferObj::create_info(resource_size, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT); VkBufferObj buffer; buffer.init(*m_device, buffer_info, (VkMemoryPropertyFlags)VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); // Offset must be less than the size of the buffer, so set it equal to the buffer size to cause an error buff_view_ci.buffer = buffer.handle(); buff_view_ci.offset = buffer.create_info().size; CatchError("VUID-VkBufferViewCreateInfo-offset-00925"); // Offset must be a multiple of VkPhysicalDeviceLimits::minTexelBufferOffsetAlignment so add 1 to ensure it is not buff_view_ci.offset = minTexelBufferOffsetAlignment + 1; CatchError("VUID-VkBufferViewCreateInfo-offset-00926"); // Set offset to acceptable value for range tests buff_view_ci.offset = minTexelBufferOffsetAlignment; // Setting range equal to 0 will cause an error to occur buff_view_ci.range = 0; CatchError("VUID-VkBufferViewCreateInfo-range-00928"); uint32_t format_size = FormatElementSize(buff_view_ci.format); // Range must be a multiple of the element size of format, so add one to ensure it is not buff_view_ci.range = format_size + 1; CatchError("VUID-VkBufferViewCreateInfo-range-00929"); // Twice the element size of format multiplied by VkPhysicalDeviceLimits::maxTexelBufferElements guarantees range divided by the // element size is greater than maxTexelBufferElements, causing failure buff_view_ci.range = 2 * format_size * dev_limits.maxTexelBufferElements; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferViewCreateInfo-range-00930"); CatchError("VUID-VkBufferViewCreateInfo-offset-00931"); // Set rage to acceptable value for buffer tests buff_view_ci.format = format_without_texel_support; buff_view_ci.range = VK_WHOLE_SIZE; // `buffer` was created using VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT so we can use that for the first buffer test CatchError("VUID-VkBufferViewCreateInfo-buffer-00933"); // Create a new buffer using VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT const VkBufferCreateInfo storage_buffer_info = VkBufferObj::create_info(resource_size, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT); VkBufferObj storage_buffer; storage_buffer.init(*m_device, storage_buffer_info, (VkMemoryPropertyFlags)VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); buff_view_ci.buffer = storage_buffer.handle(); CatchError("VUID-VkBufferViewCreateInfo-buffer-00934"); } TEST_F(VkLayerTest, InvalidDynamicOffsetCases) { // Create a descriptorSet w/ dynamic descriptor and then hit 3 offset error // cases: // 1. No dynamicOffset supplied // 2. Too many dynamicOffsets supplied // 3. Dynamic offset oversteps buffer being updated VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " requires 1 dynamicOffsets, but only 0 dynamicOffsets are left in pDynamicOffsets "); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptorSet; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout}); // Create a buffer to update the descriptor with uint32_t qfi = 0; VkBufferCreateInfo buffCI = {}; buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffCI.size = 1024; buffCI.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buffCI.queueFamilyIndexCount = 1; buffCI.pQueueFamilyIndices = &qfi; VkBuffer dyub; err = vkCreateBuffer(m_device->device(), &buffCI, NULL, &dyub); ASSERT_VK_SUCCESS(err); // Allocate memory and bind to buffer so we can make it to the appropriate error VkMemoryRequirements memReqs; vkGetBufferMemoryRequirements(m_device->device(), dyub, &memReqs); VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = memReqs.size; mem_alloc.memoryTypeIndex = 0; bool pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &mem_alloc, 0); if (!pass) { printf("%s Failed to allocate memory.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), dyub, NULL); return; } VkDeviceMemory mem; err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), dyub, mem, 0); ASSERT_VK_SUCCESS(err); // Correctly update descriptor to avoid "NOT_UPDATED" error VkDescriptorBufferInfo buffInfo = {}; buffInfo.buffer = dyub; buffInfo.offset = 0; buffInfo.range = 1024; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptorSet; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; descriptor_write.pBufferInfo = &buffInfo; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 0, NULL); m_errorMonitor->VerifyFound(); uint32_t pDynOff[2] = {512, 756}; // Now cause error b/c too many dynOffsets in array for # of dyn descriptors m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Attempting to bind 1 descriptorSets with 1 dynamic descriptors, but "); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 2, pDynOff); m_errorMonitor->VerifyFound(); // Finally cause error due to dynamicOffset being too big m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, " dynamic offset 512 combined with offset 0 and range 1024 that oversteps the buffer size of 1024"); // Create PSO to be used for draw-time errors below char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 x;\n" "layout(set=0) layout(binding=0) uniform foo { int x; int y; } bar;\n" "void main(){\n" " x = vec4(bar.y);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); VkViewport viewport = {0, 0, 16, 16, 0, 1}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); // This update should succeed, but offset size of 512 will overstep buffer // /w range 1024 & size 1024 vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 1, pDynOff); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); vkDestroyBuffer(m_device->device(), dyub, NULL); vkFreeMemory(m_device->device(), mem, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, DescriptorBufferUpdateNoMemoryBound) { TEST_DESCRIPTION("Attempt to update a descriptor with a non-sparse buffer that doesn't have memory bound"); VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " used with no memory bound. Memory should be bound by calling vkBindBufferMemory()."); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkUpdateDescriptorSets() failed write update validation for Descriptor Set 0x"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptorSet; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); // Create a buffer to update the descriptor with uint32_t qfi = 0; VkBufferCreateInfo buffCI = {}; buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffCI.size = 1024; buffCI.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buffCI.queueFamilyIndexCount = 1; buffCI.pQueueFamilyIndices = &qfi; VkBuffer dyub; err = vkCreateBuffer(m_device->device(), &buffCI, NULL, &dyub); ASSERT_VK_SUCCESS(err); // Attempt to update descriptor without binding memory to it VkDescriptorBufferInfo buffInfo = {}; buffInfo.buffer = dyub; buffInfo.offset = 0; buffInfo.range = 1024; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptorSet; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; descriptor_write.pBufferInfo = &buffInfo; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); vkDestroyBuffer(m_device->device(), dyub, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, InvalidPushConstants) { ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipelineLayout pipeline_layout; VkPushConstantRange pc_range = {}; VkPipelineLayoutCreateInfo pipeline_layout_ci = {}; pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipeline_layout_ci.pushConstantRangeCount = 1; pipeline_layout_ci.pPushConstantRanges = &pc_range; // // Check for invalid push constant ranges in pipeline layouts. // struct PipelineLayoutTestCase { VkPushConstantRange const range; char const *msg; }; const uint32_t too_big = m_device->props.limits.maxPushConstantsSize + 0x4; const std::array range_tests = {{ {{VK_SHADER_STAGE_VERTEX_BIT, 0, 0}, "vkCreatePipelineLayout() call has push constants index 0 with size 0."}, {{VK_SHADER_STAGE_VERTEX_BIT, 0, 1}, "vkCreatePipelineLayout() call has push constants index 0 with size 1."}, {{VK_SHADER_STAGE_VERTEX_BIT, 4, 1}, "vkCreatePipelineLayout() call has push constants index 0 with size 1."}, {{VK_SHADER_STAGE_VERTEX_BIT, 4, 0}, "vkCreatePipelineLayout() call has push constants index 0 with size 0."}, {{VK_SHADER_STAGE_VERTEX_BIT, 1, 4}, "vkCreatePipelineLayout() call has push constants index 0 with offset 1. Offset must"}, {{VK_SHADER_STAGE_VERTEX_BIT, 0, too_big}, "vkCreatePipelineLayout() call has push constants index 0 with offset "}, {{VK_SHADER_STAGE_VERTEX_BIT, too_big, too_big}, "vkCreatePipelineLayout() call has push constants index 0 with offset "}, {{VK_SHADER_STAGE_VERTEX_BIT, too_big, 4}, "vkCreatePipelineLayout() call has push constants index 0 with offset "}, {{VK_SHADER_STAGE_VERTEX_BIT, 0xFFFFFFF0, 0x00000020}, "vkCreatePipelineLayout() call has push constants index 0 with offset "}, {{VK_SHADER_STAGE_VERTEX_BIT, 0x00000020, 0xFFFFFFF0}, "vkCreatePipelineLayout() call has push constants index 0 with offset "}, }}; // Check for invalid offset and size for (const auto &iter : range_tests) { pc_range = iter.range; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, iter.msg); vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); } // Check for invalid stage flag pc_range.offset = 0; pc_range.size = 16; pc_range.stageFlags = 0; m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCreatePipelineLayout: value of pCreateInfo->pPushConstantRanges[0].stageFlags must not be 0"); vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); // Check for duplicate stage flags in a list of push constant ranges. // A shader can only have one push constant block and that block is mapped // to the push constant range that has that shader's stage flag set. // The shader's stage flag can only appear once in all the ranges, so the // implementation can find the one and only range to map it to. const uint32_t ranges_per_test = 5; struct DuplicateStageFlagsTestCase { VkPushConstantRange const ranges[ranges_per_test]; std::vector const msg; }; // Overlapping ranges are OK, but a stage flag can appear only once. const std::array duplicate_stageFlags_tests = { { {{{VK_SHADER_STAGE_VERTEX_BIT, 0, 4}, {VK_SHADER_STAGE_VERTEX_BIT, 0, 4}, {VK_SHADER_STAGE_VERTEX_BIT, 0, 4}, {VK_SHADER_STAGE_VERTEX_BIT, 0, 4}, {VK_SHADER_STAGE_VERTEX_BIT, 0, 4}}, { "vkCreatePipelineLayout() Duplicate stage flags found in ranges 0 and 1.", "vkCreatePipelineLayout() Duplicate stage flags found in ranges 0 and 2.", "vkCreatePipelineLayout() Duplicate stage flags found in ranges 0 and 3.", "vkCreatePipelineLayout() Duplicate stage flags found in ranges 0 and 4.", "vkCreatePipelineLayout() Duplicate stage flags found in ranges 1 and 2.", "vkCreatePipelineLayout() Duplicate stage flags found in ranges 1 and 3.", "vkCreatePipelineLayout() Duplicate stage flags found in ranges 1 and 4.", "vkCreatePipelineLayout() Duplicate stage flags found in ranges 2 and 3.", "vkCreatePipelineLayout() Duplicate stage flags found in ranges 2 and 4.", "vkCreatePipelineLayout() Duplicate stage flags found in ranges 3 and 4.", }}, {{{VK_SHADER_STAGE_VERTEX_BIT, 0, 4}, {VK_SHADER_STAGE_GEOMETRY_BIT, 0, 4}, {VK_SHADER_STAGE_FRAGMENT_BIT, 0, 4}, {VK_SHADER_STAGE_VERTEX_BIT, 0, 4}, {VK_SHADER_STAGE_GEOMETRY_BIT, 0, 4}}, { "vkCreatePipelineLayout() Duplicate stage flags found in ranges 0 and 3.", "vkCreatePipelineLayout() Duplicate stage flags found in ranges 1 and 4.", }}, {{{VK_SHADER_STAGE_FRAGMENT_BIT, 0, 4}, {VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, 0, 4}, {VK_SHADER_STAGE_VERTEX_BIT, 0, 4}, {VK_SHADER_STAGE_VERTEX_BIT, 0, 4}, {VK_SHADER_STAGE_GEOMETRY_BIT, 0, 4}}, { "vkCreatePipelineLayout() Duplicate stage flags found in ranges 2 and 3.", }}, }, }; for (const auto &iter : duplicate_stageFlags_tests) { pipeline_layout_ci.pPushConstantRanges = iter.ranges; pipeline_layout_ci.pushConstantRangeCount = ranges_per_test; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, iter.msg.begin(), iter.msg.end()); vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); } // // CmdPushConstants tests // // Setup a pipeline layout with ranges: [0,32) [16,80) const std::vector pc_range2 = {{VK_SHADER_STAGE_VERTEX_BIT, 16, 64}, {VK_SHADER_STAGE_FRAGMENT_BIT, 0, 32}}; const VkPipelineLayoutObj pipeline_layout_obj(m_device, {}, pc_range2); const uint8_t dummy_values[100] = {}; m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); // Check for invalid stage flag // Note that VU 00996 isn't reached due to parameter validation m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdPushConstants: value of stageFlags must not be 0"); vkCmdPushConstants(m_commandBuffer->handle(), pipeline_layout_obj.handle(), 0, 0, 16, dummy_values); m_errorMonitor->VerifyFound(); // Positive tests for the overlapping ranges m_errorMonitor->ExpectSuccess(); vkCmdPushConstants(m_commandBuffer->handle(), pipeline_layout_obj.handle(), VK_SHADER_STAGE_FRAGMENT_BIT, 0, 16, dummy_values); m_errorMonitor->VerifyNotFound(); m_errorMonitor->ExpectSuccess(); vkCmdPushConstants(m_commandBuffer->handle(), pipeline_layout_obj.handle(), VK_SHADER_STAGE_VERTEX_BIT, 32, 48, dummy_values); m_errorMonitor->VerifyNotFound(); m_errorMonitor->ExpectSuccess(); vkCmdPushConstants(m_commandBuffer->handle(), pipeline_layout_obj.handle(), VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT, 16, 16, dummy_values); m_errorMonitor->VerifyNotFound(); // Wrong cmd stages for extant range // No range for all cmd stages -- "VUID-vkCmdPushConstants-offset-01795" VUID-vkCmdPushConstants-offset-01795 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushConstants-offset-01795"); // Missing cmd stages for found overlapping range -- "VUID-vkCmdPushConstants-offset-01796" VUID-vkCmdPushConstants-offset-01796 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushConstants-offset-01796"); vkCmdPushConstants(m_commandBuffer->handle(), pipeline_layout_obj.handle(), VK_SHADER_STAGE_GEOMETRY_BIT, 0, 16, dummy_values); m_errorMonitor->VerifyFound(); // Wrong no extant range m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushConstants-offset-01795"); vkCmdPushConstants(m_commandBuffer->handle(), pipeline_layout_obj.handle(), VK_SHADER_STAGE_FRAGMENT_BIT, 80, 4, dummy_values); m_errorMonitor->VerifyFound(); // Wrong overlapping extent m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushConstants-offset-01795"); vkCmdPushConstants(m_commandBuffer->handle(), pipeline_layout_obj.handle(), VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT, 0, 20, dummy_values); m_errorMonitor->VerifyFound(); // Wrong stage flags for valid overlapping range m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushConstants-offset-01796"); vkCmdPushConstants(m_commandBuffer->handle(), pipeline_layout_obj.handle(), VK_SHADER_STAGE_VERTEX_BIT, 16, 16, dummy_values); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, DescriptorSetCompatibility) { // Test various desriptorSet errors with bad binding combinations using std::vector; VkResult err; ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); static const uint32_t NUM_DESCRIPTOR_TYPES = 5; VkDescriptorPoolSize ds_type_count[NUM_DESCRIPTOR_TYPES] = {}; ds_type_count[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; ds_type_count[0].descriptorCount = 10; ds_type_count[1].type = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; ds_type_count[1].descriptorCount = 2; ds_type_count[2].type = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; ds_type_count[2].descriptorCount = 2; ds_type_count[3].type = VK_DESCRIPTOR_TYPE_SAMPLER; ds_type_count[3].descriptorCount = 5; // TODO : LunarG ILO driver currently asserts in desc.c w/ INPUT_ATTACHMENT // type // ds_type_count[4].type = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; ds_type_count[4].type = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; ds_type_count[4].descriptorCount = 2; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 5; ds_pool_ci.poolSizeCount = NUM_DESCRIPTOR_TYPES; ds_pool_ci.pPoolSizes = ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); static const uint32_t MAX_DS_TYPES_IN_LAYOUT = 2; VkDescriptorSetLayoutBinding dsl_binding[MAX_DS_TYPES_IN_LAYOUT] = {}; dsl_binding[0].binding = 0; dsl_binding[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding[0].descriptorCount = 5; dsl_binding[0].stageFlags = VK_SHADER_STAGE_ALL; dsl_binding[0].pImmutableSamplers = NULL; // Create layout identical to set0 layout but w/ different stageFlags VkDescriptorSetLayoutBinding dsl_fs_stage_only = {}; dsl_fs_stage_only.binding = 0; dsl_fs_stage_only.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_fs_stage_only.descriptorCount = 5; dsl_fs_stage_only.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; // Different stageFlags to cause error at // bind time dsl_fs_stage_only.pImmutableSamplers = NULL; vector ds_layouts; // Create 4 unique layouts for full pipelineLayout, and 1 special fs-only // layout for error case ds_layouts.emplace_back(m_device, std::vector(1, dsl_binding[0])); const VkDescriptorSetLayoutObj ds_layout_fs_only(m_device, {dsl_fs_stage_only}); dsl_binding[0].binding = 0; dsl_binding[0].descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; dsl_binding[0].descriptorCount = 2; dsl_binding[1].binding = 1; dsl_binding[1].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; dsl_binding[1].descriptorCount = 2; dsl_binding[1].stageFlags = VK_SHADER_STAGE_ALL; dsl_binding[1].pImmutableSamplers = NULL; ds_layouts.emplace_back(m_device, std::vector({dsl_binding[0], dsl_binding[1]})); dsl_binding[0].binding = 0; dsl_binding[0].descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; dsl_binding[0].descriptorCount = 5; ds_layouts.emplace_back(m_device, std::vector(1, dsl_binding[0])); dsl_binding[0].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; dsl_binding[0].descriptorCount = 2; ds_layouts.emplace_back(m_device, std::vector(1, dsl_binding[0])); const auto &ds_vk_layouts = MakeVkHandles(ds_layouts); static const uint32_t NUM_SETS = 4; VkDescriptorSet descriptorSet[NUM_SETS] = {}; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorPool = ds_pool; alloc_info.descriptorSetCount = ds_vk_layouts.size(); alloc_info.pSetLayouts = ds_vk_layouts.data(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, descriptorSet); ASSERT_VK_SUCCESS(err); VkDescriptorSet ds0_fs_only = {}; alloc_info.descriptorSetCount = 1; alloc_info.pSetLayouts = &ds_layout_fs_only.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &ds0_fs_only); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layouts[0], &ds_layouts[1]}); // Create pipelineLayout with only one setLayout const VkPipelineLayoutObj single_pipe_layout(m_device, {&ds_layouts[0]}); // Create pipelineLayout with 2 descriptor setLayout at index 0 const VkPipelineLayoutObj pipe_layout_one_desc(m_device, {&ds_layouts[3]}); // Create pipelineLayout with 5 SAMPLER descriptor setLayout at index 0 const VkPipelineLayoutObj pipe_layout_five_samp(m_device, {&ds_layouts[2]}); // Create pipelineLayout with UB type, but stageFlags for FS only VkPipelineLayoutObj pipe_layout_fs_only(m_device, {&ds_layout_fs_only}); // Create pipelineLayout w/ incompatible set0 layout, but set1 is fine const VkPipelineLayoutObj pipe_layout_bad_set0(m_device, {&ds_layout_fs_only, &ds_layouts[1]}); // Create PSO to be used for draw-time errors below char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 x;\n" "layout(set=0) layout(binding=0) uniform foo { int x; int y; } bar;\n" "void main(){\n" " x = vec4(bar.y);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipe_layout_fs_only.handle(), renderPass()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); // TODO : Want to cause various binding incompatibility issues here to test // DrawState // First cause various verify_layout_compatibility() fails // Second disturb early and late sets and verify INFO msgs // VerifySetLayoutCompatibility fail cases: // 1. invalid VkPipelineLayout (layout) passed into vkCmdBindDescriptorSets m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBindDescriptorSets-layout-parameter"); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, (VkPipelineLayout)((size_t)0xbaadb1be), 0, 1, &descriptorSet[0], 0, NULL); m_errorMonitor->VerifyFound(); // 2. layoutIndex exceeds # of layouts in layout m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " attempting to bind set to index 1"); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, single_pipe_layout.handle(), 0, 2, &descriptorSet[0], 0, NULL); m_errorMonitor->VerifyFound(); // 3. Pipeline setLayout[0] has 2 descriptors, but set being bound has 5 // descriptors m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " has 2 descriptors, but DescriptorSetLayout "); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe_layout_one_desc.handle(), 0, 1, &descriptorSet[0], 0, NULL); m_errorMonitor->VerifyFound(); // 4. same # of descriptors but mismatch in type m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " is type 'VK_DESCRIPTOR_TYPE_SAMPLER' but binding "); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe_layout_five_samp.handle(), 0, 1, &descriptorSet[0], 0, NULL); m_errorMonitor->VerifyFound(); // 5. same # of descriptors but mismatch in stageFlags m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " has stageFlags 16 but binding 0 for DescriptorSetLayout "); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe_layout_fs_only.handle(), 0, 1, &descriptorSet[0], 0, NULL); m_errorMonitor->VerifyFound(); // Now that we're done actively using the pipelineLayout that gfx pipeline // was created with, we should be able to delete it. Do that now to verify // that validation obeys pipelineLayout lifetime pipe_layout_fs_only.Reset(); // Cause draw-time errors due to PSO incompatibilities // 1. Error due to not binding required set (we actually use same code as // above to disturb set0) vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 2, &descriptorSet[0], 0, NULL); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe_layout_bad_set0.handle(), 1, 1, &descriptorSet[1], 0, NULL); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " uses set #0 but that set is not bound."); VkViewport viewport = {0, 0, 16, 16, 0, 1}; VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); // 2. Error due to bound set not being compatible with PSO's // VkPipelineLayout (diff stageFlags in this case) vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 2, &descriptorSet[0], 0, NULL); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " bound as set #0 is not compatible with "); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); // Remaining clean-up m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, NoBeginCommandBuffer) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "You must call vkBeginCommandBuffer() before this call to "); ASSERT_NO_FATAL_FAILURE(Init()); VkCommandBufferObj commandBuffer(m_device, m_commandPool); // Call EndCommandBuffer() w/o calling BeginCommandBuffer() vkEndCommandBuffer(commandBuffer.handle()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, SecondaryCommandBufferNullRenderpass) { ASSERT_NO_FATAL_FAILURE(Init()); VkCommandBufferObj cb(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); // Force the failure by not setting the Renderpass and Framebuffer fields VkCommandBufferInheritanceInfo cmd_buf_hinfo = {}; cmd_buf_hinfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; VkCommandBufferBeginInfo cmd_buf_info = {}; cmd_buf_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cmd_buf_info.pNext = NULL; cmd_buf_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT; cmd_buf_info.pInheritanceInfo = &cmd_buf_hinfo; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkCommandBufferBeginInfo-flags-00053"); vkBeginCommandBuffer(cb.handle(), &cmd_buf_info); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, SecondaryCommandBufferRerecordedExplicitReset) { ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "was destroyed or rerecorded"); // A pool we can reset in. VkCommandPoolObj pool(m_device, m_device->graphics_queue_node_index_, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT); VkCommandBufferObj secondary(m_device, &pool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); secondary.begin(); secondary.end(); m_commandBuffer->begin(); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); // rerecording of secondary secondary.reset(); // explicit reset here. secondary.begin(); secondary.end(); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, SecondaryCommandBufferRerecordedNoReset) { ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "was destroyed or rerecorded"); // A pool we can reset in. VkCommandPoolObj pool(m_device, m_device->graphics_queue_node_index_, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT); VkCommandBufferObj secondary(m_device, &pool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); secondary.begin(); secondary.end(); m_commandBuffer->begin(); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); // rerecording of secondary secondary.begin(); // implicit reset in begin secondary.end(); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CascadedInvalidation) { ASSERT_NO_FATAL_FAILURE(Init()); VkEventCreateInfo eci = {VK_STRUCTURE_TYPE_EVENT_CREATE_INFO, nullptr, 0}; VkEvent event; vkCreateEvent(m_device->device(), &eci, nullptr, &event); VkCommandBufferObj secondary(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); secondary.begin(); vkCmdSetEvent(secondary.handle(), event, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT); secondary.end(); m_commandBuffer->begin(); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); m_commandBuffer->end(); // destroying the event should invalidate both primary and secondary CB vkDestroyEvent(m_device->device(), event, nullptr); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "invalid because bound Event"); m_commandBuffer->QueueCommandBuffer(false); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CommandBufferResetErrors) { // Cause error due to Begin while recording CB // Then cause 2 errors for attempting to reset CB w/o having // VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT set for the pool from // which CBs were allocated. Note that this bit is off by default. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Cannot call Begin on command buffer"); ASSERT_NO_FATAL_FAILURE(Init()); // Calls AllocateCommandBuffers VkCommandBufferObj commandBuffer(m_device, m_commandPool); // Force the failure by setting the Renderpass and Framebuffer fields with (fake) data VkCommandBufferInheritanceInfo cmd_buf_hinfo = {}; cmd_buf_hinfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; VkCommandBufferBeginInfo cmd_buf_info = {}; cmd_buf_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cmd_buf_info.pNext = NULL; cmd_buf_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; cmd_buf_info.pInheritanceInfo = &cmd_buf_hinfo; // Begin CB to transition to recording state vkBeginCommandBuffer(commandBuffer.handle(), &cmd_buf_info); // Can't re-begin. This should trigger error vkBeginCommandBuffer(commandBuffer.handle(), &cmd_buf_info); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkResetCommandBuffer-commandBuffer-00046"); VkCommandBufferResetFlags flags = 0; // Don't care about flags for this test // Reset attempt will trigger error due to incorrect CommandPool state vkResetCommandBuffer(commandBuffer.handle(), flags); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBeginCommandBuffer-commandBuffer-00050"); // Transition CB to RECORDED state vkEndCommandBuffer(commandBuffer.handle()); // Now attempting to Begin will implicitly reset, which triggers error vkBeginCommandBuffer(commandBuffer.handle(), &cmd_buf_info); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidPipelineCreateState) { // Attempt to Create Gfx Pipeline w/o a VS VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Invalid Pipeline CreateInfo State: Vertex Shader required"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptorSet; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout}); VkPipelineRasterizationStateCreateInfo rs_state_ci = {}; rs_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rs_state_ci.polygonMode = VK_POLYGON_MODE_FILL; rs_state_ci.cullMode = VK_CULL_MODE_BACK_BIT; rs_state_ci.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE; rs_state_ci.depthClampEnable = VK_FALSE; rs_state_ci.rasterizerDiscardEnable = VK_TRUE; rs_state_ci.depthBiasEnable = VK_FALSE; rs_state_ci.lineWidth = 1.0f; VkPipelineVertexInputStateCreateInfo vi_ci = {}; vi_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; vi_ci.pNext = nullptr; vi_ci.vertexBindingDescriptionCount = 0; vi_ci.pVertexBindingDescriptions = nullptr; vi_ci.vertexAttributeDescriptionCount = 0; vi_ci.pVertexAttributeDescriptions = nullptr; VkPipelineInputAssemblyStateCreateInfo ia_ci = {}; ia_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; ia_ci.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; VkPipelineShaderStageCreateInfo shaderStages[2]; memset(&shaderStages, 0, 2 * sizeof(VkPipelineShaderStageCreateInfo)); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); shaderStages[0] = fs.GetStageCreateInfo(); // should be: vs.GetStageCreateInfo(); shaderStages[1] = fs.GetStageCreateInfo(); VkGraphicsPipelineCreateInfo gp_ci = {}; gp_ci.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; gp_ci.pViewportState = nullptr; // no viewport b/c rasterizer is disabled gp_ci.pRasterizationState = &rs_state_ci; gp_ci.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT; gp_ci.layout = pipeline_layout.handle(); gp_ci.renderPass = renderPass(); gp_ci.pVertexInputState = &vi_ci; gp_ci.pInputAssemblyState = &ia_ci; gp_ci.stageCount = 1; gp_ci.pStages = shaderStages; VkPipelineCacheCreateInfo pc_ci = {}; pc_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; pc_ci.initialDataSize = 0; pc_ci.pInitialData = 0; VkPipeline pipeline; VkPipelineCache pipelineCache; err = vkCreatePipelineCache(m_device->device(), &pc_ci, NULL, &pipelineCache); ASSERT_VK_SUCCESS(err); err = vkCreateGraphicsPipelines(m_device->device(), pipelineCache, 1, &gp_ci, NULL, &pipeline); m_errorMonitor->VerifyFound(); // Finally, check the string validation for the shader stage pName variable. Correct the shader stage data, and bork the // string before calling again m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "contains invalid characters or is badly formed"); shaderStages[0] = vs.GetStageCreateInfo(); const uint8_t cont_char = 0xf8; char bad_string[] = {static_cast(cont_char), static_cast(cont_char), static_cast(cont_char), static_cast(cont_char)}; shaderStages[0].pName = bad_string; err = vkCreateGraphicsPipelines(m_device->device(), pipelineCache, 1, &gp_ci, NULL, &pipeline); m_errorMonitor->VerifyFound(); vkDestroyPipelineCache(m_device->device(), pipelineCache, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, InvalidPipelineSampleRateFeatureDisable) { // Enable sample shading in pipeline when the feature is disabled. ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Disable sampleRateShading here VkPhysicalDeviceFeatures device_features = {}; ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features)); device_features.sampleRateShading = VK_FALSE; ASSERT_NO_FATAL_FAILURE(InitState(&device_features)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Cause the error by enabling sample shading... auto set_shading_enable = [](CreatePipelineHelper &helper) { helper.pipe_ms_state_ci_.sampleShadingEnable = VK_TRUE; }; CreatePipelineHelper::OneshotTest(*this, set_shading_enable, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineMultisampleStateCreateInfo-sampleShadingEnable-00784"); } TEST_F(VkLayerTest, InvalidPipelineSampleRateFeatureEnable) { // Enable sample shading in pipeline when the feature is disabled. ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Require sampleRateShading here VkPhysicalDeviceFeatures device_features = {}; ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features)); if (device_features.sampleRateShading == VK_FALSE) { printf("%s SampleRateShading feature is disabled -- skipping related checks.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState(&device_features)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); auto range_test = [this](float value, bool positive_test) { auto info_override = [value](CreatePipelineHelper &helper) { helper.pipe_ms_state_ci_.sampleShadingEnable = VK_TRUE; helper.pipe_ms_state_ci_.minSampleShading = value; }; CreatePipelineHelper::OneshotTest(*this, info_override, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineMultisampleStateCreateInfo-minSampleShading-00786", positive_test); }; range_test(NearestSmaller(0.0F), false); range_test(NearestGreater(1.0F), false); range_test(0.0F, /* positive_test= */ true); range_test(1.0F, /* positive_test= */ true); } TEST_F(VkLayerTest, InvalidPipelineSamplePNext) { // Enable sample shading in pipeline when the feature is disabled. // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Set up the extension structs auto sampleLocations = chain_util::Init(); sampleLocations.sampleLocationsInfo.sType = VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT; auto coverageToColor = chain_util::Init(); auto coverageModulation = chain_util::Init(); auto discriminatrix = [this](const char *name) { return DeviceExtensionSupported(gpu(), nullptr, name); }; chain_util::ExtensionChain chain(discriminatrix, &m_device_extension_names); chain.Add(VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME, sampleLocations); chain.Add(VK_NV_FRAGMENT_COVERAGE_TO_COLOR_EXTENSION_NAME, coverageToColor); chain.Add(VK_NV_FRAMEBUFFER_MIXED_SAMPLES_EXTENSION_NAME, coverageModulation); const void *extension_head = chain.Head(); ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); if (extension_head) { auto good_chain = [extension_head](CreatePipelineHelper &helper) { helper.pipe_ms_state_ci_.pNext = extension_head; }; CreatePipelineHelper::OneshotTest(*this, good_chain, (VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT), "No error", true); } else { printf("%s Required extension not present -- skipping positive checks.\n", kSkipPrefix); } auto instance_ci = chain_util::Init(); auto bad_chain = [&instance_ci](CreatePipelineHelper &helper) { helper.pipe_ms_state_ci_.pNext = &instance_ci; }; CreatePipelineHelper::OneshotTest(*this, bad_chain, VK_DEBUG_REPORT_WARNING_BIT_EXT, "VUID-VkPipelineMultisampleStateCreateInfo-pNext-pNext"); } TEST_F(VkLayerTest, VertexAttributeDivisorExtension) { TEST_DESCRIPTION("Test VUIDs added with VK_EXT_vertex_attribute_divisor extension."); bool inst_ext = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); if (inst_ext) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); } if (inst_ext && DeviceExtensionSupported(gpu(), nullptr, VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME); return; } VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT vadf = {}; vadf.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT; vadf.vertexAttributeInstanceRateDivisor = VK_TRUE; vadf.vertexAttributeInstanceRateZeroDivisor = VK_TRUE; VkPhysicalDeviceFeatures2 pd_features2 = {}; pd_features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2; pd_features2.pNext = &vadf; ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &pd_features2)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const VkPhysicalDeviceLimits &dev_limits = m_device->props.limits; VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT pdvad_props = {}; pdvad_props.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT; VkPhysicalDeviceProperties2 pd_props2 = {}; pd_props2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2; pd_props2.pNext = &pdvad_props; vkGetPhysicalDeviceProperties2(gpu(), &pd_props2); VkVertexInputBindingDivisorDescriptionEXT vibdd = {}; VkPipelineVertexInputDivisorStateCreateInfoEXT pvids_ci = {}; pvids_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT; pvids_ci.vertexBindingDivisorCount = 1; pvids_ci.pVertexBindingDivisors = &vibdd; VkVertexInputBindingDescription vibd = {}; vibd.stride = 12; vibd.inputRate = VK_VERTEX_INPUT_RATE_VERTEX; using std::vector; struct TestCase { uint32_t div_binding; uint32_t div_divisor; uint32_t desc_binding; VkVertexInputRate desc_rate; vector vuids; }; // clang-format off vector test_cases = { { 0, 1, 0, VK_VERTEX_INPUT_RATE_VERTEX, {"VUID-VkVertexInputBindingDivisorDescriptionEXT-inputRate-01871"} }, { dev_limits.maxVertexInputBindings + 1, 1, 0, VK_VERTEX_INPUT_RATE_INSTANCE, {"VUID-VkVertexInputBindingDivisorDescriptionEXT-binding-01869", "VUID-VkVertexInputBindingDivisorDescriptionEXT-inputRate-01871"} } }; if (UINT32_MAX != pdvad_props.maxVertexAttribDivisor) { // Can't test overflow if maxVAD is UINT32_MAX test_cases.push_back( { 0, pdvad_props.maxVertexAttribDivisor + 1, 0, VK_VERTEX_INPUT_RATE_INSTANCE, {"VUID-VkVertexInputBindingDivisorDescriptionEXT-divisor-01870"} } ); } // clang-format on for (const auto &test_case : test_cases) { const auto bad_divisor_state = [&test_case, &vibdd, &pvids_ci, &vibd](CreatePipelineHelper &helper) { vibdd.binding = test_case.div_binding; vibdd.divisor = test_case.div_divisor; vibd.binding = test_case.desc_binding; vibd.inputRate = test_case.desc_rate; helper.vi_ci_.pNext = &pvids_ci; helper.vi_ci_.vertexBindingDescriptionCount = 1; helper.vi_ci_.pVertexBindingDescriptions = &vibd; }; CreatePipelineHelper::OneshotTest(*this, bad_divisor_state, VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.vuids); } } TEST_F(VkLayerTest, VertexAttributeDivisorDisabled) { TEST_DESCRIPTION("Test instance divisor feature disabled for VK_EXT_vertex_attribute_divisor extension."); bool inst_ext = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); if (inst_ext) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); } if (inst_ext && DeviceExtensionSupported(gpu(), nullptr, VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME); return; } VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT vadf = {}; vadf.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT; vadf.vertexAttributeInstanceRateDivisor = VK_FALSE; vadf.vertexAttributeInstanceRateZeroDivisor = VK_FALSE; VkPhysicalDeviceFeatures2 pd_features2 = {}; pd_features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2; pd_features2.pNext = &vadf; ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &pd_features2)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkVertexInputBindingDivisorDescriptionEXT vibdd = {}; vibdd.binding = 0; vibdd.divisor = 2; VkPipelineVertexInputDivisorStateCreateInfoEXT pvids_ci = {}; pvids_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT; pvids_ci.vertexBindingDivisorCount = 1; pvids_ci.pVertexBindingDivisors = &vibdd; VkVertexInputBindingDescription vibd = {}; vibd.binding = vibdd.binding; vibd.stride = 12; vibd.inputRate = VK_VERTEX_INPUT_RATE_INSTANCE; const auto instance_rate = [&pvids_ci, &vibd](CreatePipelineHelper &helper) { helper.vi_ci_.pNext = &pvids_ci; helper.vi_ci_.vertexBindingDescriptionCount = 1; helper.vi_ci_.pVertexBindingDescriptions = &vibd; }; CreatePipelineHelper::OneshotTest(*this, instance_rate, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkVertexInputBindingDivisorDescriptionEXT-vertexAttributeInstanceRateDivisor-02229"); } TEST_F(VkLayerTest, VertexAttributeDivisorInstanceRateZero) { TEST_DESCRIPTION("Test instanceRateZero feature of VK_EXT_vertex_attribute_divisor extension."); bool inst_ext = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); if (inst_ext) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); } if (inst_ext && DeviceExtensionSupported(gpu(), nullptr, VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME); return; } VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT vadf = {}; vadf.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT; vadf.vertexAttributeInstanceRateDivisor = VK_TRUE; vadf.vertexAttributeInstanceRateZeroDivisor = VK_FALSE; VkPhysicalDeviceFeatures2 pd_features2 = {}; pd_features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2; pd_features2.pNext = &vadf; ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &pd_features2)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkVertexInputBindingDivisorDescriptionEXT vibdd = {}; vibdd.binding = 0; vibdd.divisor = 0; VkPipelineVertexInputDivisorStateCreateInfoEXT pvids_ci = {}; pvids_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT; pvids_ci.vertexBindingDivisorCount = 1; pvids_ci.pVertexBindingDivisors = &vibdd; VkVertexInputBindingDescription vibd = {}; vibd.binding = vibdd.binding; vibd.stride = 12; vibd.inputRate = VK_VERTEX_INPUT_RATE_INSTANCE; const auto instance_rate = [&pvids_ci, &vibd](CreatePipelineHelper &helper) { helper.vi_ci_.pNext = &pvids_ci; helper.vi_ci_.vertexBindingDescriptionCount = 1; helper.vi_ci_.pVertexBindingDescriptions = &vibd; }; CreatePipelineHelper::OneshotTest( *this, instance_rate, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkVertexInputBindingDivisorDescriptionEXT-vertexAttributeInstanceRateZeroDivisor-02228"); } /*// TODO : This test should be good, but needs Tess support in compiler to run TEST_F(VkLayerTest, InvalidPatchControlPoints) { // Attempt to Create Gfx Pipeline w/o a VS VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Invalid Pipeline CreateInfo State: VK_PRIMITIVE_TOPOLOGY_PATCH primitive "); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), VK_DESCRIPTOR_POOL_USAGE_NON_FREE, 1, &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; VkDescriptorSetLayoutCreateInfo ds_layout_ci = {}; ds_layout_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; ds_layout_ci.pNext = NULL; ds_layout_ci.bindingCount = 1; ds_layout_ci.pBindings = &dsl_binding; VkDescriptorSetLayout ds_layout; err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); VkDescriptorSet descriptorSet; err = vkAllocateDescriptorSets(m_device->device(), ds_pool, VK_DESCRIPTOR_SET_USAGE_NON_FREE, 1, &ds_layout, &descriptorSet); ASSERT_VK_SUCCESS(err); VkPipelineLayoutCreateInfo pipeline_layout_ci = {}; pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipeline_layout_ci.pNext = NULL; pipeline_layout_ci.setLayoutCount = 1; pipeline_layout_ci.pSetLayouts = &ds_layout; VkPipelineLayout pipeline_layout; err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); ASSERT_VK_SUCCESS(err); VkPipelineShaderStageCreateInfo shaderStages[3]; memset(&shaderStages, 0, 3 * sizeof(VkPipelineShaderStageCreateInfo)); VkShaderObj vs(m_device,bindStateVertShaderText,VK_SHADER_STAGE_VERTEX_BIT, this); // Just using VS txt for Tess shaders as we don't care about functionality VkShaderObj tc(m_device,bindStateVertShaderText,VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, this); VkShaderObj te(m_device,bindStateVertShaderText,VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, this); shaderStages[0].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; shaderStages[0].stage = VK_SHADER_STAGE_VERTEX_BIT; shaderStages[0].shader = vs.handle(); shaderStages[1].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; shaderStages[1].stage = VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT; shaderStages[1].shader = tc.handle(); shaderStages[2].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; shaderStages[2].stage = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT; shaderStages[2].shader = te.handle(); VkPipelineInputAssemblyStateCreateInfo iaCI = {}; iaCI.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; iaCI.topology = VK_PRIMITIVE_TOPOLOGY_PATCH_LIST; VkPipelineTessellationStateCreateInfo tsCI = {}; tsCI.sType = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO; tsCI.patchControlPoints = 0; // This will cause an error VkGraphicsPipelineCreateInfo gp_ci = {}; gp_ci.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; gp_ci.pNext = NULL; gp_ci.stageCount = 3; gp_ci.pStages = shaderStages; gp_ci.pVertexInputState = NULL; gp_ci.pInputAssemblyState = &iaCI; gp_ci.pTessellationState = &tsCI; gp_ci.pViewportState = NULL; gp_ci.pRasterizationState = NULL; gp_ci.pMultisampleState = NULL; gp_ci.pDepthStencilState = NULL; gp_ci.pColorBlendState = NULL; gp_ci.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT; gp_ci.layout = pipeline_layout; gp_ci.renderPass = renderPass(); VkPipelineCacheCreateInfo pc_ci = {}; pc_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; pc_ci.pNext = NULL; pc_ci.initialSize = 0; pc_ci.initialData = 0; pc_ci.maxSize = 0; VkPipeline pipeline; VkPipelineCache pipelineCache; err = vkCreatePipelineCache(m_device->device(), &pc_ci, NULL, &pipelineCache); ASSERT_VK_SUCCESS(err); err = vkCreateGraphicsPipelines(m_device->device(), pipelineCache, 1, &gp_ci, NULL, &pipeline); m_errorMonitor->VerifyFound(); vkDestroyPipelineCache(m_device->device(), pipelineCache, NULL); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } */ TEST_F(VkLayerTest, PSOViewportStateTests) { TEST_DESCRIPTION("Test VkPipelineViewportStateCreateInfo viewport and scissor count validation for non-multiViewport"); VkPhysicalDeviceFeatures features{}; ASSERT_NO_FATAL_FAILURE(Init(&features)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const auto break_vp_state = [](CreatePipelineHelper &helper) { helper.rs_state_ci_.rasterizerDiscardEnable = VK_FALSE; helper.gp_ci_.pViewportState = nullptr; }; CreatePipelineHelper::OneshotTest(*this, break_vp_state, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00750"); VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; VkViewport viewports[] = {viewport, viewport}; VkRect2D scissor = {{0, 0}, {64, 64}}; VkRect2D scissors[] = {scissor, scissor}; // test viewport and scissor arrays using std::vector; struct TestCase { uint32_t viewport_count; VkViewport *viewports; uint32_t scissor_count; VkRect2D *scissors; vector vuids; }; vector test_cases = { {0, viewports, 1, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {2, viewports, 1, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {1, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {1, viewports, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {0, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"}}, {2, viewports, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"}}, {0, viewports, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {2, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {1, nullptr, 1, scissors, {"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747"}}, {1, viewports, 1, nullptr, {"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}}, {1, nullptr, 1, nullptr, {"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}}, {2, nullptr, 3, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}}, {0, nullptr, 0, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"}}, }; for (const auto &test_case : test_cases) { const auto break_vp = [&test_case](CreatePipelineHelper &helper) { helper.vp_state_ci_.viewportCount = test_case.viewport_count; helper.vp_state_ci_.pViewports = test_case.viewports; helper.vp_state_ci_.scissorCount = test_case.scissor_count; helper.vp_state_ci_.pScissors = test_case.scissors; }; CreatePipelineHelper::OneshotTest(*this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.vuids); } vector dyn_test_cases = { {0, viewports, 1, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {2, viewports, 1, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {1, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {1, viewports, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {0, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"}}, {2, viewports, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"}}, {0, viewports, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {2, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {2, nullptr, 3, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {0, nullptr, 0, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"}}, }; const VkDynamicState dyn_states[] = {VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR}; for (const auto &test_case : dyn_test_cases) { const auto break_vp = [&](CreatePipelineHelper &helper) { VkPipelineDynamicStateCreateInfo dyn_state_ci = {}; dyn_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; dyn_state_ci.dynamicStateCount = size(dyn_states); dyn_state_ci.pDynamicStates = dyn_states; helper.dyn_state_ci_ = dyn_state_ci; helper.vp_state_ci_.viewportCount = test_case.viewport_count; helper.vp_state_ci_.pViewports = test_case.viewports; helper.vp_state_ci_.scissorCount = test_case.scissor_count; helper.vp_state_ci_.pScissors = test_case.scissors; }; CreatePipelineHelper::OneshotTest(*this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.vuids); } } // Set Extension dynamic states without enabling the required Extensions. TEST_F(VkLayerTest, ExtensionDynamicStatesSetWOExtensionEnabled) { TEST_DESCRIPTION("Create a graphics pipeline with Extension dynamic states without enabling the required Extensions."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); using std::vector; struct TestCase { uint32_t dynamic_state_count; VkDynamicState dynamic_state; char const *errmsg; }; vector dyn_test_cases = { {1, VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV, "contains VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV, but VK_NV_clip_space_w_scaling"}, {1, VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT, "contains VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT, but VK_EXT_discard_rectangles"}, {1, VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT, "contains VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT, but VK_EXT_sample_locations"}, }; for (const auto &test_case : dyn_test_cases) { VkDynamicState state[1]; state[0] = test_case.dynamic_state; const auto break_vp = [&](CreatePipelineHelper &helper) { VkPipelineDynamicStateCreateInfo dyn_state_ci = {}; dyn_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; dyn_state_ci.dynamicStateCount = test_case.dynamic_state_count; dyn_state_ci.pDynamicStates = state; helper.dyn_state_ci_ = dyn_state_ci; }; CreatePipelineHelper::OneshotTest(*this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.errmsg); } } TEST_F(VkLayerTest, PSOViewportStateMultiViewportTests) { TEST_DESCRIPTION("Test VkPipelineViewportStateCreateInfo viewport and scissor count validation for multiViewport feature"); ASSERT_NO_FATAL_FAILURE(Init()); // enables all supported features if (!m_device->phy().features().multiViewport) { printf("%s VkPhysicalDeviceFeatures::multiViewport is not supported -- skipping test.\n", kSkipPrefix); return; } // at least 16 viewports supported from here on ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; VkViewport viewports[] = {viewport, viewport}; VkRect2D scissor = {{0, 0}, {64, 64}}; VkRect2D scissors[] = {scissor, scissor}; using std::vector; struct TestCase { uint32_t viewport_count; VkViewport *viewports; uint32_t scissor_count; VkRect2D *scissors; vector vuids; }; vector test_cases = { {0, viewports, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-arraylength", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {2, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-scissorCount-arraylength", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {0, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-arraylength", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-arraylength"}}, {2, nullptr, 2, scissors, {"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747"}}, {2, viewports, 2, nullptr, {"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}}, {2, nullptr, 2, nullptr, {"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}}, {0, nullptr, 0, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-arraylength", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-arraylength"}}, }; const auto max_viewports = m_device->phy().properties().limits.maxViewports; const bool max_viewports_maxxed = max_viewports == std::numeric_limits::max(); if (max_viewports_maxxed) { printf("%s VkPhysicalDeviceLimits::maxViewports is UINT32_MAX -- skipping part of test requiring to exceed maxViewports.\n", kSkipPrefix); } else { const auto too_much_viewports = max_viewports + 1; // avoid potentially big allocations by using only nullptr test_cases.push_back({too_much_viewports, nullptr, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01218", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747"}}); test_cases.push_back({2, viewports, too_much_viewports, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01219", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}}); test_cases.push_back( {too_much_viewports, nullptr, too_much_viewports, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01218", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01219", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}}); } for (const auto &test_case : test_cases) { const auto break_vp = [&test_case](CreatePipelineHelper &helper) { helper.vp_state_ci_.viewportCount = test_case.viewport_count; helper.vp_state_ci_.pViewports = test_case.viewports; helper.vp_state_ci_.scissorCount = test_case.scissor_count; helper.vp_state_ci_.pScissors = test_case.scissors; }; CreatePipelineHelper::OneshotTest(*this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.vuids); } vector dyn_test_cases = { {0, viewports, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-arraylength", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {2, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-scissorCount-arraylength", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {0, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-arraylength", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-arraylength"}}, {0, nullptr, 0, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-arraylength", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-arraylength"}}, }; if (!max_viewports_maxxed) { const auto too_much_viewports = max_viewports + 1; // avoid potentially big allocations by using only nullptr dyn_test_cases.push_back({too_much_viewports, nullptr, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01218", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}); dyn_test_cases.push_back({2, viewports, too_much_viewports, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01219", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}); dyn_test_cases.push_back({too_much_viewports, nullptr, too_much_viewports, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01218", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01219"}}); } const VkDynamicState dyn_states[] = {VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR}; for (const auto &test_case : dyn_test_cases) { const auto break_vp = [&](CreatePipelineHelper &helper) { VkPipelineDynamicStateCreateInfo dyn_state_ci = {}; dyn_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; dyn_state_ci.dynamicStateCount = size(dyn_states); dyn_state_ci.pDynamicStates = dyn_states; helper.dyn_state_ci_ = dyn_state_ci; helper.vp_state_ci_.viewportCount = test_case.viewport_count; helper.vp_state_ci_.pViewports = test_case.viewports; helper.vp_state_ci_.scissorCount = test_case.scissor_count; helper.vp_state_ci_.pScissors = test_case.scissors; }; CreatePipelineHelper::OneshotTest(*this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.vuids); } } TEST_F(VkLayerTest, DynViewportAndScissorUndefinedDrawState) { TEST_DESCRIPTION("Test viewport and scissor dynamic state that is not set before draw"); ASSERT_NO_FATAL_FAILURE(Init()); // TODO: should also test on !multiViewport if (!m_device->phy().features().multiViewport) { printf("%s Device does not support multiple viewports/scissors; skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); const VkPipelineLayoutObj pipeline_layout(m_device); VkPipelineObj pipeline_dyn_vp(m_device); pipeline_dyn_vp.AddShader(&vs); pipeline_dyn_vp.AddShader(&fs); pipeline_dyn_vp.AddDefaultColorAttachment(); pipeline_dyn_vp.MakeDynamic(VK_DYNAMIC_STATE_VIEWPORT); pipeline_dyn_vp.SetScissor(m_scissors); ASSERT_VK_SUCCESS(pipeline_dyn_vp.CreateVKPipeline(pipeline_layout.handle(), m_renderPass)); VkPipelineObj pipeline_dyn_sc(m_device); pipeline_dyn_sc.AddShader(&vs); pipeline_dyn_sc.AddShader(&fs); pipeline_dyn_sc.AddDefaultColorAttachment(); pipeline_dyn_sc.SetViewport(m_viewports); pipeline_dyn_sc.MakeDynamic(VK_DYNAMIC_STATE_SCISSOR); ASSERT_VK_SUCCESS(pipeline_dyn_sc.CreateVKPipeline(pipeline_layout.handle(), m_renderPass)); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic viewport(s) 0 are used by pipeline state object, "); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_dyn_vp.handle()); vkCmdSetViewport(m_commandBuffer->handle(), 1, 1, &m_viewports[0]); // Forgetting to set needed 0th viewport (PSO viewportCount == 1) m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic scissor(s) 0 are used by pipeline state object, "); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_dyn_sc.handle()); vkCmdSetScissor(m_commandBuffer->handle(), 1, 1, &m_scissors[0]); // Forgetting to set needed 0th scissor (PSO scissorCount == 1) m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, PSOLineWidthInvalid) { TEST_DESCRIPTION("Test non-1.0 lineWidth errors when pipeline is created and in vkCmdSetLineWidth"); VkPhysicalDeviceFeatures features{}; ASSERT_NO_FATAL_FAILURE(Init(&features)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineShaderStageCreateInfo shader_state_cis[] = {vs.GetStageCreateInfo(), fs.GetStageCreateInfo()}; VkPipelineVertexInputStateCreateInfo vi_state_ci = {}; vi_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; VkPipelineInputAssemblyStateCreateInfo ia_state_ci = {}; ia_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; ia_state_ci.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; VkRect2D scissor = {{0, 0}, {64, 64}}; VkPipelineViewportStateCreateInfo vp_state_ci = {}; vp_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; vp_state_ci.viewportCount = 1; vp_state_ci.pViewports = &viewport; vp_state_ci.scissorCount = 1; vp_state_ci.pScissors = &scissor; VkPipelineRasterizationStateCreateInfo rs_state_ci = {}; rs_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rs_state_ci.rasterizerDiscardEnable = VK_FALSE; // lineWidth to be set by checks VkPipelineMultisampleStateCreateInfo ms_state_ci = {}; ms_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; // must match subpass att. VkPipelineColorBlendAttachmentState cba_state = {}; VkPipelineColorBlendStateCreateInfo cb_state_ci = {}; cb_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; cb_state_ci.attachmentCount = 1; // must match count in subpass cb_state_ci.pAttachments = &cba_state; const VkPipelineLayoutObj pipeline_layout(m_device); VkGraphicsPipelineCreateInfo gp_ci = {}; gp_ci.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; gp_ci.stageCount = sizeof(shader_state_cis) / sizeof(VkPipelineShaderStageCreateInfo); gp_ci.pStages = shader_state_cis; gp_ci.pVertexInputState = &vi_state_ci; gp_ci.pInputAssemblyState = &ia_state_ci; gp_ci.pViewportState = &vp_state_ci; gp_ci.pRasterizationState = &rs_state_ci; gp_ci.pMultisampleState = &ms_state_ci; gp_ci.pColorBlendState = &cb_state_ci; gp_ci.layout = pipeline_layout.handle(); gp_ci.renderPass = renderPass(); gp_ci.subpass = 0; const std::vector test_cases = {-1.0f, 0.0f, NearestSmaller(1.0f), NearestGreater(1.0f), NAN}; // test VkPipelineRasterizationStateCreateInfo::lineWidth for (const auto test_case : test_cases) { rs_state_ci.lineWidth = test_case; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00749"); VkPipeline pipeline; vkCreateGraphicsPipelines(m_device->device(), VK_NULL_HANDLE, 1, &gp_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } // test vkCmdSetLineWidth m_commandBuffer->begin(); for (const auto test_case : test_cases) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetLineWidth-lineWidth-00788"); vkCmdSetLineWidth(m_commandBuffer->handle(), test_case); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, VUID_VkVertexInputBindingDescription_binding_00618) { TEST_DESCRIPTION( "Test VUID-VkVertexInputBindingDescription-binding-00618: binding must be less than " "VkPhysicalDeviceLimits::maxVertexInputBindings"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipelineCache pipeline_cache; { VkPipelineCacheCreateInfo create_info{}; create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; VkResult err = vkCreatePipelineCache(m_device->device(), &create_info, nullptr, &pipeline_cache); ASSERT_VK_SUCCESS(err); } VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineShaderStageCreateInfo stages[2]{{}}; stages[0] = vs.GetStageCreateInfo(); stages[1] = fs.GetStageCreateInfo(); // Test when binding is greater than or equal to VkPhysicalDeviceLimits::maxVertexInputBindings. VkVertexInputBindingDescription vertex_input_binding_description{}; vertex_input_binding_description.binding = m_device->props.limits.maxVertexInputBindings; VkPipelineVertexInputStateCreateInfo vertex_input_state{}; vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; vertex_input_state.pNext = nullptr; vertex_input_state.vertexBindingDescriptionCount = 1; vertex_input_state.pVertexBindingDescriptions = &vertex_input_binding_description; vertex_input_state.vertexAttributeDescriptionCount = 0; vertex_input_state.pVertexAttributeDescriptions = nullptr; VkPipelineInputAssemblyStateCreateInfo input_assembly_state{}; input_assembly_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; input_assembly_state.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; VkRect2D scissor = {{0, 0}, {64, 64}}; VkPipelineViewportStateCreateInfo viewport_state{}; viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; viewport_state.viewportCount = 1; viewport_state.pViewports = &viewport; viewport_state.scissorCount = 1; viewport_state.pScissors = &scissor; VkPipelineMultisampleStateCreateInfo multisample_state{}; multisample_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; multisample_state.pNext = nullptr; multisample_state.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; multisample_state.sampleShadingEnable = 0; multisample_state.minSampleShading = 1.0; multisample_state.pSampleMask = nullptr; VkPipelineRasterizationStateCreateInfo rasterization_state{}; rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rasterization_state.polygonMode = VK_POLYGON_MODE_FILL; rasterization_state.cullMode = VK_CULL_MODE_BACK_BIT; rasterization_state.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE; rasterization_state.depthClampEnable = VK_FALSE; rasterization_state.rasterizerDiscardEnable = VK_FALSE; rasterization_state.depthBiasEnable = VK_FALSE; rasterization_state.lineWidth = 1.0f; const VkPipelineLayoutObj pipeline_layout(m_device); { VkGraphicsPipelineCreateInfo create_info{}; create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; create_info.stageCount = 2; create_info.pStages = stages; create_info.pVertexInputState = &vertex_input_state; create_info.pInputAssemblyState = &input_assembly_state; create_info.pViewportState = &viewport_state; create_info.pMultisampleState = &multisample_state; create_info.pRasterizationState = &rasterization_state; create_info.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT; create_info.layout = pipeline_layout.handle(); create_info.renderPass = renderPass(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkVertexInputBindingDescription-binding-00618"); VkPipeline pipeline; vkCreateGraphicsPipelines(m_device->device(), pipeline_cache, 1, &create_info, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } vkDestroyPipelineCache(m_device->device(), pipeline_cache, nullptr); } TEST_F(VkLayerTest, VUID_VkVertexInputBindingDescription_stride_00619) { TEST_DESCRIPTION( "Test VUID-VkVertexInputBindingDescription-stride-00619: stride must be less than or equal to " "VkPhysicalDeviceLimits::maxVertexInputBindingStride"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipelineCache pipeline_cache; { VkPipelineCacheCreateInfo create_info{}; create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; VkResult err = vkCreatePipelineCache(m_device->device(), &create_info, nullptr, &pipeline_cache); ASSERT_VK_SUCCESS(err); } VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineShaderStageCreateInfo stages[2]{{}}; stages[0] = vs.GetStageCreateInfo(); stages[1] = fs.GetStageCreateInfo(); // Test when stride is greater than VkPhysicalDeviceLimits::maxVertexInputBindingStride. VkVertexInputBindingDescription vertex_input_binding_description{}; vertex_input_binding_description.stride = m_device->props.limits.maxVertexInputBindingStride + 1; VkPipelineVertexInputStateCreateInfo vertex_input_state{}; vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; vertex_input_state.pNext = nullptr; vertex_input_state.vertexBindingDescriptionCount = 1; vertex_input_state.pVertexBindingDescriptions = &vertex_input_binding_description; vertex_input_state.vertexAttributeDescriptionCount = 0; vertex_input_state.pVertexAttributeDescriptions = nullptr; VkPipelineInputAssemblyStateCreateInfo input_assembly_state{}; input_assembly_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; input_assembly_state.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; VkRect2D scissor = {{0, 0}, {64, 64}}; VkPipelineViewportStateCreateInfo viewport_state{}; viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; viewport_state.viewportCount = 1; viewport_state.pViewports = &viewport; viewport_state.scissorCount = 1; viewport_state.pScissors = &scissor; VkPipelineMultisampleStateCreateInfo multisample_state{}; multisample_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; multisample_state.pNext = nullptr; multisample_state.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; multisample_state.sampleShadingEnable = 0; multisample_state.minSampleShading = 1.0; multisample_state.pSampleMask = nullptr; VkPipelineRasterizationStateCreateInfo rasterization_state{}; rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rasterization_state.polygonMode = VK_POLYGON_MODE_FILL; rasterization_state.cullMode = VK_CULL_MODE_BACK_BIT; rasterization_state.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE; rasterization_state.depthClampEnable = VK_FALSE; rasterization_state.rasterizerDiscardEnable = VK_FALSE; rasterization_state.depthBiasEnable = VK_FALSE; rasterization_state.lineWidth = 1.0f; const VkPipelineLayoutObj pipeline_layout(m_device); { VkGraphicsPipelineCreateInfo create_info{}; create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; create_info.stageCount = 2; create_info.pStages = stages; create_info.pVertexInputState = &vertex_input_state; create_info.pInputAssemblyState = &input_assembly_state; create_info.pViewportState = &viewport_state; create_info.pMultisampleState = &multisample_state; create_info.pRasterizationState = &rasterization_state; create_info.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT; create_info.layout = pipeline_layout.handle(); create_info.renderPass = renderPass(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkVertexInputBindingDescription-stride-00619"); VkPipeline pipeline; vkCreateGraphicsPipelines(m_device->device(), pipeline_cache, 1, &create_info, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } vkDestroyPipelineCache(m_device->device(), pipeline_cache, nullptr); } TEST_F(VkLayerTest, VUID_VkVertexInputAttributeDescription_location_00620) { TEST_DESCRIPTION( "Test VUID-VkVertexInputAttributeDescription-location-00620: location must be less than " "VkPhysicalDeviceLimits::maxVertexInputAttributes"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipelineCache pipeline_cache; { VkPipelineCacheCreateInfo create_info{}; create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; VkResult err = vkCreatePipelineCache(m_device->device(), &create_info, nullptr, &pipeline_cache); ASSERT_VK_SUCCESS(err); } VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineShaderStageCreateInfo stages[2]{{}}; stages[0] = vs.GetStageCreateInfo(); stages[1] = fs.GetStageCreateInfo(); // Test when location is greater than or equal to VkPhysicalDeviceLimits::maxVertexInputAttributes. VkVertexInputAttributeDescription vertex_input_attribute_description{}; vertex_input_attribute_description.location = m_device->props.limits.maxVertexInputAttributes; VkPipelineVertexInputStateCreateInfo vertex_input_state{}; vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; vertex_input_state.pNext = nullptr; vertex_input_state.vertexBindingDescriptionCount = 0; vertex_input_state.pVertexBindingDescriptions = nullptr; vertex_input_state.vertexAttributeDescriptionCount = 1; vertex_input_state.pVertexAttributeDescriptions = &vertex_input_attribute_description; VkPipelineInputAssemblyStateCreateInfo input_assembly_state{}; input_assembly_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; input_assembly_state.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; VkRect2D scissor = {{0, 0}, {64, 64}}; VkPipelineViewportStateCreateInfo viewport_state{}; viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; viewport_state.viewportCount = 1; viewport_state.pViewports = &viewport; viewport_state.scissorCount = 1; viewport_state.pScissors = &scissor; VkPipelineMultisampleStateCreateInfo multisample_state{}; multisample_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; multisample_state.pNext = nullptr; multisample_state.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; multisample_state.sampleShadingEnable = 0; multisample_state.minSampleShading = 1.0; multisample_state.pSampleMask = nullptr; VkPipelineRasterizationStateCreateInfo rasterization_state{}; rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rasterization_state.polygonMode = VK_POLYGON_MODE_FILL; rasterization_state.cullMode = VK_CULL_MODE_BACK_BIT; rasterization_state.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE; rasterization_state.depthClampEnable = VK_FALSE; rasterization_state.rasterizerDiscardEnable = VK_FALSE; rasterization_state.depthBiasEnable = VK_FALSE; rasterization_state.lineWidth = 1.0f; const VkPipelineLayoutObj pipeline_layout(m_device); { VkGraphicsPipelineCreateInfo create_info{}; create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; create_info.stageCount = 2; create_info.pStages = stages; create_info.pVertexInputState = &vertex_input_state; create_info.pInputAssemblyState = &input_assembly_state; create_info.pViewportState = &viewport_state; create_info.pMultisampleState = &multisample_state; create_info.pRasterizationState = &rasterization_state; create_info.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT; create_info.layout = pipeline_layout.handle(); create_info.renderPass = renderPass(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkVertexInputAttributeDescription-location-00620"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineVertexInputStateCreateInfo-binding-00615"); VkPipeline pipeline; vkCreateGraphicsPipelines(m_device->device(), pipeline_cache, 1, &create_info, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } vkDestroyPipelineCache(m_device->device(), pipeline_cache, nullptr); } TEST_F(VkLayerTest, VUID_VkVertexInputAttributeDescription_binding_00621) { TEST_DESCRIPTION( "Test VUID-VkVertexInputAttributeDescription-binding-00621: binding must be less than " "VkPhysicalDeviceLimits::maxVertexInputBindings"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipelineCache pipeline_cache; { VkPipelineCacheCreateInfo create_info{}; create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; VkResult err = vkCreatePipelineCache(m_device->device(), &create_info, nullptr, &pipeline_cache); ASSERT_VK_SUCCESS(err); } VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineShaderStageCreateInfo stages[2]{{}}; stages[0] = vs.GetStageCreateInfo(); stages[1] = fs.GetStageCreateInfo(); // Test when binding is greater than or equal to VkPhysicalDeviceLimits::maxVertexInputBindings. VkVertexInputAttributeDescription vertex_input_attribute_description{}; vertex_input_attribute_description.binding = m_device->props.limits.maxVertexInputBindings; VkPipelineVertexInputStateCreateInfo vertex_input_state{}; vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; vertex_input_state.pNext = nullptr; vertex_input_state.vertexBindingDescriptionCount = 0; vertex_input_state.pVertexBindingDescriptions = nullptr; vertex_input_state.vertexAttributeDescriptionCount = 1; vertex_input_state.pVertexAttributeDescriptions = &vertex_input_attribute_description; VkPipelineInputAssemblyStateCreateInfo input_assembly_state{}; input_assembly_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; input_assembly_state.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; VkRect2D scissor = {{0, 0}, {64, 64}}; VkPipelineViewportStateCreateInfo viewport_state{}; viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; viewport_state.viewportCount = 1; viewport_state.pViewports = &viewport; viewport_state.scissorCount = 1; viewport_state.pScissors = &scissor; VkPipelineMultisampleStateCreateInfo multisample_state{}; multisample_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; multisample_state.pNext = nullptr; multisample_state.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; multisample_state.sampleShadingEnable = 0; multisample_state.minSampleShading = 1.0; multisample_state.pSampleMask = nullptr; VkPipelineRasterizationStateCreateInfo rasterization_state{}; rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rasterization_state.polygonMode = VK_POLYGON_MODE_FILL; rasterization_state.cullMode = VK_CULL_MODE_BACK_BIT; rasterization_state.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE; rasterization_state.depthClampEnable = VK_FALSE; rasterization_state.rasterizerDiscardEnable = VK_FALSE; rasterization_state.depthBiasEnable = VK_FALSE; rasterization_state.lineWidth = 1.0f; const VkPipelineLayoutObj pipeline_layout(m_device); { VkGraphicsPipelineCreateInfo create_info{}; create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; create_info.stageCount = 2; create_info.pStages = stages; create_info.pVertexInputState = &vertex_input_state; create_info.pInputAssemblyState = &input_assembly_state; create_info.pViewportState = &viewport_state; create_info.pMultisampleState = &multisample_state; create_info.pRasterizationState = &rasterization_state; create_info.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT; create_info.layout = pipeline_layout.handle(); create_info.renderPass = renderPass(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkVertexInputAttributeDescription-binding-00621"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineVertexInputStateCreateInfo-binding-00615"); VkPipeline pipeline; vkCreateGraphicsPipelines(m_device->device(), pipeline_cache, 1, &create_info, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } vkDestroyPipelineCache(m_device->device(), pipeline_cache, nullptr); } TEST_F(VkLayerTest, VUID_VkVertexInputAttributeDescription_offset_00622) { TEST_DESCRIPTION( "Test VUID-VkVertexInputAttributeDescription-offset-00622: offset must be less than or equal to " "VkPhysicalDeviceLimits::maxVertexInputAttributeOffset"); EnableDeviceProfileLayer(); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); uint32_t maxVertexInputAttributeOffset = 0; { VkPhysicalDeviceProperties device_props = {}; vkGetPhysicalDeviceProperties(gpu(), &device_props); maxVertexInputAttributeOffset = device_props.limits.maxVertexInputAttributeOffset; if (maxVertexInputAttributeOffset == 0xFFFFFFFF) { // Attempt to artificially lower maximum offset PFN_vkSetPhysicalDeviceLimitsEXT fpvkSetPhysicalDeviceLimitsEXT = (PFN_vkSetPhysicalDeviceLimitsEXT)vkGetInstanceProcAddr(instance(), "vkSetPhysicalDeviceLimitsEXT"); if (!fpvkSetPhysicalDeviceLimitsEXT) { printf("%s All offsets are valid & device_profile_api not found; skipped.\n", kSkipPrefix); return; } device_props.limits.maxVertexInputAttributeOffset = device_props.limits.maxVertexInputBindingStride - 2; fpvkSetPhysicalDeviceLimitsEXT(gpu(), &device_props.limits); maxVertexInputAttributeOffset = device_props.limits.maxVertexInputAttributeOffset; } } ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipelineCache pipeline_cache; { VkPipelineCacheCreateInfo create_info{}; create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; VkResult err = vkCreatePipelineCache(m_device->device(), &create_info, nullptr, &pipeline_cache); ASSERT_VK_SUCCESS(err); } VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineShaderStageCreateInfo stages[2]{{}}; stages[0] = vs.GetStageCreateInfo(); stages[1] = fs.GetStageCreateInfo(); VkVertexInputBindingDescription vertex_input_binding_description{}; vertex_input_binding_description.binding = 0; vertex_input_binding_description.stride = m_device->props.limits.maxVertexInputBindingStride; vertex_input_binding_description.inputRate = VK_VERTEX_INPUT_RATE_VERTEX; // Test when offset is greater than maximum. VkVertexInputAttributeDescription vertex_input_attribute_description{}; vertex_input_attribute_description.format = VK_FORMAT_R8_UNORM; vertex_input_attribute_description.offset = maxVertexInputAttributeOffset + 1; VkPipelineVertexInputStateCreateInfo vertex_input_state{}; vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; vertex_input_state.pNext = nullptr; vertex_input_state.vertexBindingDescriptionCount = 1; vertex_input_state.pVertexBindingDescriptions = &vertex_input_binding_description; vertex_input_state.vertexAttributeDescriptionCount = 1; vertex_input_state.pVertexAttributeDescriptions = &vertex_input_attribute_description; VkPipelineInputAssemblyStateCreateInfo input_assembly_state{}; input_assembly_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; input_assembly_state.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; VkPipelineMultisampleStateCreateInfo multisample_state{}; multisample_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; multisample_state.pNext = nullptr; multisample_state.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; multisample_state.sampleShadingEnable = 0; multisample_state.minSampleShading = 1.0; multisample_state.pSampleMask = nullptr; VkPipelineRasterizationStateCreateInfo rasterization_state{}; rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rasterization_state.polygonMode = VK_POLYGON_MODE_FILL; rasterization_state.cullMode = VK_CULL_MODE_BACK_BIT; rasterization_state.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE; rasterization_state.depthClampEnable = VK_FALSE; rasterization_state.rasterizerDiscardEnable = VK_TRUE; rasterization_state.depthBiasEnable = VK_FALSE; rasterization_state.lineWidth = 1.0f; const VkPipelineLayoutObj pipeline_layout(m_device); { VkGraphicsPipelineCreateInfo create_info{}; create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; create_info.stageCount = 2; create_info.pStages = stages; create_info.pVertexInputState = &vertex_input_state; create_info.pInputAssemblyState = &input_assembly_state; create_info.pViewportState = nullptr; // no viewport b/c rasterizer is disabled create_info.pMultisampleState = &multisample_state; create_info.pRasterizationState = &rasterization_state; create_info.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT; create_info.layout = pipeline_layout.handle(); create_info.renderPass = renderPass(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkVertexInputAttributeDescription-offset-00622"); VkPipeline pipeline; vkCreateGraphicsPipelines(m_device->device(), pipeline_cache, 1, &create_info, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } vkDestroyPipelineCache(m_device->device(), pipeline_cache, nullptr); } TEST_F(VkLayerTest, NullRenderPass) { // Bind a NULL RenderPass m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdBeginRenderPass: required parameter pRenderPassBegin specified as NULL"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); // Don't care about RenderPass handle b/c error should be flagged before // that vkCmdBeginRenderPass(m_commandBuffer->handle(), NULL, VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, EndCommandBufferWithinRenderPass) { TEST_DESCRIPTION("End a command buffer with an active render pass"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "It is invalid to issue this call inside an active render pass"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkEndCommandBuffer(m_commandBuffer->handle()); m_errorMonitor->VerifyFound(); // End command buffer properly to avoid driver issues. This is safe -- the // previous vkEndCommandBuffer should not have reached the driver. m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // TODO: Add test for VK_COMMAND_BUFFER_LEVEL_SECONDARY // TODO: Add test for VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT } TEST_F(VkLayerTest, FillBufferWithinRenderPass) { // Call CmdFillBuffer within an active renderpass m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "It is invalid to issue this call inside an active render pass"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); VkMemoryPropertyFlags reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; VkBufferObj dstBuffer; dstBuffer.init_as_dst(*m_device, (VkDeviceSize)1024, reqs); m_commandBuffer->FillBuffer(dstBuffer.handle(), 0, 4, 0x11111111); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, UpdateBufferWithinRenderPass) { // Call CmdUpdateBuffer within an active renderpass m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "It is invalid to issue this call inside an active render pass"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); VkMemoryPropertyFlags reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; VkBufferObj dstBuffer; dstBuffer.init_as_dst(*m_device, (VkDeviceSize)1024, reqs); VkDeviceSize dstOffset = 0; uint32_t Data[] = {1, 2, 3, 4, 5, 6, 7, 8}; VkDeviceSize dataSize = sizeof(Data) / sizeof(uint32_t); vkCmdUpdateBuffer(m_commandBuffer->handle(), dstBuffer.handle(), dstOffset, dataSize, &Data); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, ClearColorImageWithBadRange) { TEST_DESCRIPTION("Record clear color with an invalid VkImageSubresourceRange"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(image.create_info().arrayLayers == 1); ASSERT_TRUE(image.initialized()); image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); const VkClearColorValue clear_color = {{0.0f, 0.0f, 0.0f, 1.0f}}; m_commandBuffer->begin(); const auto cb_handle = m_commandBuffer->handle(); // Try baseMipLevel >= image.mipLevels with VK_REMAINING_MIP_LEVELS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-baseMipLevel-01470"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 1, VK_REMAINING_MIP_LEVELS, 0, 1}; vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range); m_errorMonitor->VerifyFound(); } // Try baseMipLevel >= image.mipLevels without VK_REMAINING_MIP_LEVELS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-baseMipLevel-01470"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-pRanges-01692"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 1, 1, 0, 1}; vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range); m_errorMonitor->VerifyFound(); } // Try levelCount = 0 { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-pRanges-01692"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 0, 1}; vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range); m_errorMonitor->VerifyFound(); } // Try baseMipLevel + levelCount > image.mipLevels { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-pRanges-01692"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 2, 0, 1}; vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer >= image.arrayLayers with VK_REMAINING_ARRAY_LAYERS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-baseArrayLayer-01472"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 1, VK_REMAINING_ARRAY_LAYERS}; vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer >= image.arrayLayers without VK_REMAINING_ARRAY_LAYERS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-baseArrayLayer-01472"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-pRanges-01693"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 1, 1}; vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range); m_errorMonitor->VerifyFound(); } // Try layerCount = 0 { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-pRanges-01693"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 0}; vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer + layerCount > image.arrayLayers { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-pRanges-01693"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 2}; vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, ClearDepthStencilWithBadRange) { TEST_DESCRIPTION("Record clear depth with an invalid VkImageSubresourceRange"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } VkImageObj image(m_device); image.Init(32, 32, 1, depth_format, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(image.create_info().arrayLayers == 1); ASSERT_TRUE(image.initialized()); const VkImageAspectFlags ds_aspect = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; image.SetLayout(ds_aspect, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); const VkClearDepthStencilValue clear_value = {}; m_commandBuffer->begin(); const auto cb_handle = m_commandBuffer->handle(); // Try baseMipLevel >= image.mipLevels with VK_REMAINING_MIP_LEVELS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-baseMipLevel-01474"); const VkImageSubresourceRange range = {ds_aspect, 1, VK_REMAINING_MIP_LEVELS, 0, 1}; vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range); m_errorMonitor->VerifyFound(); } // Try baseMipLevel >= image.mipLevels without VK_REMAINING_MIP_LEVELS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-baseMipLevel-01474"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-pRanges-01694"); const VkImageSubresourceRange range = {ds_aspect, 1, 1, 0, 1}; vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range); m_errorMonitor->VerifyFound(); } // Try levelCount = 0 { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-pRanges-01694"); const VkImageSubresourceRange range = {ds_aspect, 0, 0, 0, 1}; vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range); m_errorMonitor->VerifyFound(); } // Try baseMipLevel + levelCount > image.mipLevels { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-pRanges-01694"); const VkImageSubresourceRange range = {ds_aspect, 0, 2, 0, 1}; vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer >= image.arrayLayers with VK_REMAINING_ARRAY_LAYERS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-baseArrayLayer-01476"); const VkImageSubresourceRange range = {ds_aspect, 0, 1, 1, VK_REMAINING_ARRAY_LAYERS}; vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer >= image.arrayLayers without VK_REMAINING_ARRAY_LAYERS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-baseArrayLayer-01476"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-pRanges-01695"); const VkImageSubresourceRange range = {ds_aspect, 0, 1, 1, 1}; vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range); m_errorMonitor->VerifyFound(); } // Try layerCount = 0 { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-pRanges-01695"); const VkImageSubresourceRange range = {ds_aspect, 0, 1, 0, 0}; vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer + layerCount > image.arrayLayers { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-pRanges-01695"); const VkImageSubresourceRange range = {ds_aspect, 0, 1, 0, 2}; vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, ClearColorImageWithinRenderPass) { // Call CmdClearColorImage within an active RenderPass m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "It is invalid to issue this call inside an active render pass"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); VkClearColorValue clear_color; memset(clear_color.uint32, 0, sizeof(uint32_t) * 4); const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; const int32_t tex_width = 32; const int32_t tex_height = 32; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = tex_width; image_create_info.extent.height = tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; vk_testing::Image dstImage; dstImage.init(*m_device, (const VkImageCreateInfo &)image_create_info); const VkImageSubresourceRange range = vk_testing::Image::subresource_range(image_create_info, VK_IMAGE_ASPECT_COLOR_BIT); vkCmdClearColorImage(m_commandBuffer->handle(), dstImage.handle(), VK_IMAGE_LAYOUT_GENERAL, &clear_color, 1, &range); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, ClearDepthStencilImageErrors) { // Hit errors related to vkCmdClearDepthStencilImage() // 1. Use an image that doesn't have VK_IMAGE_USAGE_TRANSFER_DST_BIT set // 2. Call CmdClearDepthStencilImage within an active RenderPass ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } VkClearDepthStencilValue clear_value = {0}; VkMemoryPropertyFlags reqs = 0; VkImageCreateInfo image_create_info = vk_testing::Image::create_info(); image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = depth_format; image_create_info.extent.width = 64; image_create_info.extent.height = 64; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; // Error here is that VK_IMAGE_USAGE_TRANSFER_DST_BIT is excluded for DS image that we'll call Clear on below image_create_info.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT; vk_testing::Image dst_image_bad_usage; dst_image_bad_usage.init(*m_device, (const VkImageCreateInfo &)image_create_info, reqs); const VkImageSubresourceRange range = vk_testing::Image::subresource_range(image_create_info, VK_IMAGE_ASPECT_DEPTH_BIT); m_commandBuffer->begin(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-image-00009"); vkCmdClearDepthStencilImage(m_commandBuffer->handle(), dst_image_bad_usage.handle(), VK_IMAGE_LAYOUT_GENERAL, &clear_value, 1, &range); m_errorMonitor->VerifyFound(); // Fix usage for next test case image_create_info.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; vk_testing::Image dst_image; dst_image.init(*m_device, (const VkImageCreateInfo &)image_create_info, reqs); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-renderpass"); vkCmdClearDepthStencilImage(m_commandBuffer->handle(), dst_image.handle(), VK_IMAGE_LAYOUT_GENERAL, &clear_value, 1, &range); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, ClearColorAttachmentsOutsideRenderPass) { // Call CmdClearAttachmentss outside of an active RenderPass m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdClearAttachments(): This call must be issued inside an active render pass"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Start no RenderPass m_commandBuffer->begin(); VkClearAttachment color_attachment; color_attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; color_attachment.clearValue.color.float32[0] = 0; color_attachment.clearValue.color.float32[1] = 0; color_attachment.clearValue.color.float32[2] = 0; color_attachment.clearValue.color.float32[3] = 0; color_attachment.colorAttachment = 0; VkClearRect clear_rect = {{{0, 0}, {32, 32}}}; vkCmdClearAttachments(m_commandBuffer->handle(), 1, &color_attachment, 1, &clear_rect); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, BufferMemoryBarrierNoBuffer) { // Try to add a buffer memory barrier with no buffer. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "required parameter pBufferMemoryBarriers[0].buffer specified as VK_NULL_HANDLE"); ASSERT_NO_FATAL_FAILURE(Init()); m_commandBuffer->begin(); VkBufferMemoryBarrier buf_barrier = {}; buf_barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER; buf_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; buf_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; buf_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; buf_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; buf_barrier.buffer = VK_NULL_HANDLE; buf_barrier.offset = 0; buf_barrier.size = VK_WHOLE_SIZE; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 0, nullptr, 1, &buf_barrier, 0, nullptr); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidBarriers) { TEST_DESCRIPTION("A variety of ways to get VK_INVALID_BARRIER "); ASSERT_NO_FATAL_FAILURE(Init()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } // Add a token self-dependency for this test to avoid unexpected errors m_addRenderPassSelfDependency = true; ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); // Use image unbound to memory in barrier m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " used with no memory bound. Memory should be bound by calling vkBindImageMemory()"); vk_testing::Image unbound_image; auto unbound_image_info = vk_testing::Image::create_info(); unbound_image_info.format = VK_FORMAT_B8G8R8A8_UNORM; unbound_image_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT; unbound_image.init_no_mem(*m_device, unbound_image_info); auto unbound_subresource = vk_testing::Image::subresource_range(unbound_image_info, VK_IMAGE_ASPECT_COLOR_BIT); auto unbound_image_barrier = unbound_image.image_memory_barrier(0, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, unbound_subresource); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1, &unbound_image_barrier); m_errorMonitor->VerifyFound(); // Use buffer unbound to memory in barrier m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " used with no memory bound. Memory should be bound by calling vkBindBufferMemory()"); VkBufferObj unbound_buffer; auto unbound_buffer_info = VkBufferObj::create_info(16, VK_IMAGE_USAGE_TRANSFER_DST_BIT); unbound_buffer.init_no_mem(*m_device, unbound_buffer_info); auto unbound_buffer_barrier = unbound_buffer.buffer_memory_barrier(0, VK_ACCESS_TRANSFER_WRITE_BIT, 0, 16); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 1, &unbound_buffer_barrier, 0, nullptr); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-newLayout-01198"); VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageMemoryBarrier img_barrier = {}; img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier.pNext = NULL; img_barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; img_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; img_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED; // New layout can't be UNDEFINED img_barrier.newLayout = VK_IMAGE_LAYOUT_UNDEFINED; img_barrier.image = m_renderTargets[0]->handle(); img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // Transition image to color attachment optimal img_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); // TODO: this looks vestigal or incomplete... m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; // Can't send buffer memory barrier during a render pass vkCmdEndRenderPass(m_commandBuffer->handle()); // Duplicate barriers that change layout img_barrier.image = image.handle(); img_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED; img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; VkImageMemoryBarrier img_barriers[2] = {img_barrier, img_barrier}; // Transitions from UNDEFINED are valid, even if duplicated m_errorMonitor->ExpectSuccess(); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 2, img_barriers); m_errorMonitor->VerifyNotFound(); // Duplication of layout transitions (not from undefined) are not valid img_barriers[0].oldLayout = VK_IMAGE_LAYOUT_GENERAL; img_barriers[0].newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barriers[1].oldLayout = img_barriers[0].oldLayout; img_barriers[1].newLayout = img_barriers[0].newLayout; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-oldLayout-01197"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 2, img_barriers); m_errorMonitor->VerifyFound(); VkBufferObj buffer; VkMemoryPropertyFlags mem_reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; buffer.init_as_src_and_dst(*m_device, 256, mem_reqs); VkBufferMemoryBarrier buf_barrier = {}; buf_barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER; buf_barrier.pNext = NULL; buf_barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; buf_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; buf_barrier.buffer = buffer.handle(); buf_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; buf_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; buf_barrier.offset = 0; buf_barrier.size = VK_WHOLE_SIZE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferMemoryBarrier-offset-01187"); // Exceed the buffer size buf_barrier.offset = buffer.create_info().size + 1; // Offset greater than total size vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 1, &buf_barrier, 0, nullptr); m_errorMonitor->VerifyFound(); buf_barrier.offset = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferMemoryBarrier-size-01189"); buf_barrier.size = buffer.create_info().size + 1; // Size greater than total size vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 1, &buf_barrier, 0, nullptr); m_errorMonitor->VerifyFound(); // Now exercise barrier aspect bit errors, first DS m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresource-aspectMask-parameter"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-image-01207"); VkDepthStencilObj ds_image(m_device); ds_image.Init(m_device, 128, 128, depth_format); ASSERT_TRUE(ds_image.initialized()); img_barrier.oldLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.image = ds_image.handle(); // Not having DEPTH or STENCIL set is an error img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_METADATA_BIT; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // Having only one of depth or stencil set for DS image is an error m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-image-01207"); img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // Having anything other than DEPTH and STENCIL is an error m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresource-aspectMask-parameter"); img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_COLOR_BIT; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // Now test depth-only VkFormatProperties format_props; vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), VK_FORMAT_D16_UNORM, &format_props); if (format_props.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) { VkDepthStencilObj d_image(m_device); d_image.Init(m_device, 128, 128, VK_FORMAT_D16_UNORM); ASSERT_TRUE(d_image.initialized()); img_barrier.oldLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.image = d_image.handle(); // DEPTH bit must be set m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Depth-only image formats must have the VK_IMAGE_ASPECT_DEPTH_BIT set."); img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_METADATA_BIT; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // No bits other than DEPTH may be set m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Depth-only image formats can have only the VK_IMAGE_ASPECT_DEPTH_BIT set."); img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_COLOR_BIT; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Now test stencil-only vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), VK_FORMAT_S8_UINT, &format_props); if (format_props.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) { VkDepthStencilObj s_image(m_device); s_image.Init(m_device, 128, 128, VK_FORMAT_S8_UINT); ASSERT_TRUE(s_image.initialized()); img_barrier.oldLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.image = s_image.handle(); // Use of COLOR aspect on depth image is error m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Stencil-only image formats must have the VK_IMAGE_ASPECT_STENCIL_BIT set."); img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Finally test color VkImageObj c_image(m_device); c_image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(c_image.initialized()); img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.image = c_image.handle(); // COLOR bit must be set m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Color image formats must have the VK_IMAGE_ASPECT_COLOR_BIT set."); img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_METADATA_BIT; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // No bits other than COLOR may be set m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Color image formats must have ONLY the VK_IMAGE_ASPECT_COLOR_BIT set."); img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // A barrier's new and old VkImageLayout must be compatible with an image's VkImageUsageFlags. { VkImageObj img_color(m_device); img_color.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_color.initialized()); VkImageObj img_ds(m_device); img_ds.Init(128, 128, 1, depth_format, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_ds.initialized()); VkImageObj img_xfer_src(m_device); img_xfer_src.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_xfer_src.initialized()); VkImageObj img_xfer_dst(m_device); img_xfer_dst.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_xfer_dst.initialized()); VkImageObj img_sampled(m_device); img_sampled.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_sampled.initialized()); VkImageObj img_input(m_device); img_input.Init(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_input.initialized()); const struct { VkImageObj &image_obj; VkImageLayout bad_layout; std::string msg_code; } bad_buffer_layouts[] = { // clang-format off // images _without_ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT {img_ds, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01208"}, {img_xfer_src, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01208"}, {img_xfer_dst, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01208"}, {img_sampled, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01208"}, {img_input, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01208"}, // images _without_ VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT {img_color, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01209"}, {img_xfer_src, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01209"}, {img_xfer_dst, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01209"}, {img_sampled, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01209"}, {img_input, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01209"}, {img_color, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01210"}, {img_xfer_src, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01210"}, {img_xfer_dst, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01210"}, {img_sampled, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01210"}, {img_input, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01210"}, // images _without_ VK_IMAGE_USAGE_SAMPLED_BIT or VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT {img_color, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01211"}, {img_ds, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01211"}, {img_xfer_src, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01211"}, {img_xfer_dst, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01211"}, // images _without_ VK_IMAGE_USAGE_TRANSFER_SRC_BIT {img_color, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01212"}, {img_ds, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01212"}, {img_xfer_dst, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01212"}, {img_sampled, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01212"}, {img_input, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01212"}, // images _without_ VK_IMAGE_USAGE_TRANSFER_DST_BIT {img_color, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01213"}, {img_ds, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01213"}, {img_xfer_src, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01213"}, {img_sampled, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01213"}, {img_input, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01213"}, // clang-format on }; const uint32_t layout_count = sizeof(bad_buffer_layouts) / sizeof(bad_buffer_layouts[0]); for (uint32_t i = 0; i < layout_count; ++i) { img_barrier.image = bad_buffer_layouts[i].image_obj.handle(); const VkImageUsageFlags usage = bad_buffer_layouts[i].image_obj.usage(); img_barrier.subresourceRange.aspectMask = (usage == VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) ? (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT) : VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.oldLayout = bad_buffer_layouts[i].bad_layout; img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, bad_buffer_layouts[i].msg_code); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); img_barrier.oldLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.newLayout = bad_buffer_layouts[i].bad_layout; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, bad_buffer_layouts[i].msg_code); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } img_barrier.oldLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; } // Attempt barrier where srcAccessMask is not supported by srcStageMask m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01184"); // Have lower-order bit that's supported (shader write), but higher-order bit not supported to verify multi-bit validation buf_barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_SHADER_WRITE_BIT; buf_barrier.offset = 0; buf_barrier.size = VK_WHOLE_SIZE; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 1, &buf_barrier, 0, nullptr); m_errorMonitor->VerifyFound(); // Attempt barrier where dsAccessMask is not supported by dstStageMask buf_barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01185"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 1, &buf_barrier, 0, nullptr); m_errorMonitor->VerifyFound(); // Attempt to mismatch barriers/waitEvents calls with incompatible queues // Create command pool with incompatible queueflags const std::vector queue_props = m_device->queue_props; uint32_t queue_family_index = m_device->QueueFamilyMatching(VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT); if (queue_family_index == UINT32_MAX) { printf("%s No non-compute queue supporting graphics found; skipped.\n", kSkipPrefix); return; // NOTE: this exits the test function! } m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-srcStageMask-01183"); VkCommandPoolObj command_pool(m_device, queue_family_index, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT); VkCommandBufferObj bad_command_buffer(m_device, &command_pool); bad_command_buffer.begin(); buf_barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT; // Set two bits that should both be supported as a bonus positive check buf_barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT; vkCmdPipelineBarrier(bad_command_buffer.handle(), VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 1, &buf_barrier, 0, nullptr); m_errorMonitor->VerifyFound(); // Check for error for trying to wait on pipeline stage not supported by this queue. Specifically since our queue is not a // compute queue, vkCmdWaitEvents cannot have it's source stage mask be VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdWaitEvents-srcStageMask-01164"); VkEvent event; VkEventCreateInfo event_create_info{}; event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; vkCreateEvent(m_device->device(), &event_create_info, nullptr, &event); vkCmdWaitEvents(bad_command_buffer.handle(), 1, &event, /*source stage mask*/ VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, nullptr, 0, nullptr, 0, nullptr); m_errorMonitor->VerifyFound(); bad_command_buffer.end(); vkDestroyEvent(m_device->device(), event, nullptr); } // Helpers for the tests below static void ValidOwnershipTransferOp(ErrorMonitor *monitor, VkCommandBufferObj *cb, VkPipelineStageFlags src_stages, VkPipelineStageFlags dst_stages, const VkBufferMemoryBarrier *buf_barrier, const VkImageMemoryBarrier *img_barrier) { monitor->ExpectSuccess(); cb->begin(); uint32_t num_buf_barrier = (buf_barrier) ? 1 : 0; uint32_t num_img_barrier = (img_barrier) ? 1 : 0; cb->PipelineBarrier(src_stages, dst_stages, 0, 0, nullptr, num_buf_barrier, buf_barrier, num_img_barrier, img_barrier); cb->end(); cb->QueueCommandBuffer(); // Implicitly waits monitor->VerifyNotFound(); } static void ValidOwnershipTransfer(ErrorMonitor *monitor, VkCommandBufferObj *cb_from, VkCommandBufferObj *cb_to, VkPipelineStageFlags src_stages, VkPipelineStageFlags dst_stages, const VkBufferMemoryBarrier *buf_barrier, const VkImageMemoryBarrier *img_barrier) { ValidOwnershipTransferOp(monitor, cb_from, src_stages, dst_stages, buf_barrier, img_barrier); ValidOwnershipTransferOp(monitor, cb_to, src_stages, dst_stages, buf_barrier, img_barrier); } TEST_F(VkPositiveLayerTest, OwnershipTranfersImage) { TEST_DESCRIPTION("Valid image ownership transfers that shouldn't create errors"); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); uint32_t no_gfx = m_device->QueueFamilyWithoutCapabilities(VK_QUEUE_GRAPHICS_BIT); if (no_gfx == UINT32_MAX) { printf("%s Required queue families not present (non-graphics capable required).\n", kSkipPrefix); return; } VkQueueObj *no_gfx_queue = m_device->queue_family_queues(no_gfx)[0].get(); VkCommandPoolObj no_gfx_pool(m_device, no_gfx, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT); VkCommandBufferObj no_gfx_cb(m_device, &no_gfx_pool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, no_gfx_queue); // Create an "exclusive" image owned by the graphics queue. VkImageObj image(m_device); VkFlags image_use = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, image_use, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); auto image_subres = image.subresource_range(VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1); auto image_barrier = image.image_memory_barrier(0, 0, image.Layout(), image.Layout(), image_subres); image_barrier.srcQueueFamilyIndex = m_device->graphics_queue_node_index_; image_barrier.dstQueueFamilyIndex = no_gfx; ValidOwnershipTransfer(m_errorMonitor, m_commandBuffer, &no_gfx_cb, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, nullptr, &image_barrier); // Change layouts while changing ownership image_barrier.srcQueueFamilyIndex = no_gfx; image_barrier.dstQueueFamilyIndex = m_device->graphics_queue_node_index_; image_barrier.oldLayout = image.Layout(); // Make sure the new layout is different from the old if (image_barrier.oldLayout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) { image_barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; } else { image_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; } ValidOwnershipTransfer(m_errorMonitor, &no_gfx_cb, m_commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, nullptr, &image_barrier); } TEST_F(VkPositiveLayerTest, OwnershipTranfersBuffer) { TEST_DESCRIPTION("Valid buffer ownership transfers that shouldn't create errors"); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); uint32_t no_gfx = m_device->QueueFamilyWithoutCapabilities(VK_QUEUE_GRAPHICS_BIT); if (no_gfx == UINT32_MAX) { printf("%s Required queue families not present (non-graphics capable required).\n", kSkipPrefix); return; } VkQueueObj *no_gfx_queue = m_device->queue_family_queues(no_gfx)[0].get(); VkCommandPoolObj no_gfx_pool(m_device, no_gfx, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT); VkCommandBufferObj no_gfx_cb(m_device, &no_gfx_pool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, no_gfx_queue); // Create a buffer const VkDeviceSize buffer_size = 256; uint8_t data[buffer_size] = {0xFF}; VkConstantBufferObj buffer(m_device, buffer_size, data, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT); ASSERT_TRUE(buffer.initialized()); auto buffer_barrier = buffer.buffer_memory_barrier(0, 0, 0, VK_WHOLE_SIZE); // Let gfx own it. buffer_barrier.srcQueueFamilyIndex = m_device->graphics_queue_node_index_; buffer_barrier.dstQueueFamilyIndex = m_device->graphics_queue_node_index_; ValidOwnershipTransferOp(m_errorMonitor, m_commandBuffer, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, &buffer_barrier, nullptr); // Transfer it to non-gfx buffer_barrier.dstQueueFamilyIndex = no_gfx; ValidOwnershipTransfer(m_errorMonitor, m_commandBuffer, &no_gfx_cb, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, &buffer_barrier, nullptr); // Transfer it to gfx buffer_barrier.srcQueueFamilyIndex = no_gfx; buffer_barrier.dstQueueFamilyIndex = m_device->graphics_queue_node_index_; ValidOwnershipTransfer(m_errorMonitor, &no_gfx_cb, m_commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, &buffer_barrier, nullptr); } class BarrierQueueFamilyTestHelper { public: struct QueueFamilyObjs { uint32_t index; // We would use std::unique_ptr, but this triggers a compiler error on older compilers VkQueueObj *queue = nullptr; VkCommandPoolObj *command_pool = nullptr; VkCommandBufferObj *command_buffer = nullptr; VkCommandBufferObj *command_buffer2 = nullptr; ~QueueFamilyObjs() { delete command_buffer2; delete command_buffer; delete command_pool; delete queue; } void Init(VkDeviceObj *device, uint32_t qf_index, VkQueue qf_queue, VkCommandPoolCreateFlags cp_flags) { index = qf_index; queue = new VkQueueObj(qf_queue, qf_index); command_pool = new VkCommandPoolObj(device, qf_index, cp_flags); command_buffer = new VkCommandBufferObj(device, command_pool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, queue); command_buffer2 = new VkCommandBufferObj(device, command_pool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, queue); }; }; struct Context { VkLayerTest *layer_test; uint32_t default_index; std::unordered_map queue_families; Context(VkLayerTest *test, const std::vector &queue_family_indices) : layer_test(test) { if (0 == queue_family_indices.size()) { return; // This is invalid } VkDeviceObj *device_obj = layer_test->DeviceObj(); queue_families.reserve(queue_family_indices.size()); default_index = queue_family_indices[0]; for (auto qfi : queue_family_indices) { VkQueue queue = device_obj->queue_family_queues(qfi)[0]->handle(); queue_families.emplace(std::make_pair(qfi, QueueFamilyObjs())); queue_families[qfi].Init(device_obj, qfi, queue, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT); } Reset(); } void Reset() { layer_test->DeviceObj()->wait(); for (auto &qf : queue_families) { vkResetCommandPool(layer_test->device(), qf.second.command_pool->handle(), 0); } } }; BarrierQueueFamilyTestHelper(Context *context) : context_(context), image_(context->layer_test->DeviceObj()) {} // Init with queue families non-null for CONCURRENT sharing mode (which requires them) void Init(std::vector *families) { VkDeviceObj *device_obj = context_->layer_test->DeviceObj(); image_.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0, families); ASSERT_TRUE(image_.initialized()); image_barrier_ = image_.image_memory_barrier(VK_ACCESS_TRANSFER_READ_BIT, VK_ACCESS_TRANSFER_READ_BIT, image_.Layout(), image_.Layout(), image_.subresource_range(VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1)); VkMemoryPropertyFlags mem_prop = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; buffer_.init_as_src_and_dst(*device_obj, 256, mem_prop, families); ASSERT_TRUE(buffer_.initialized()); buffer_barrier_ = buffer_.buffer_memory_barrier(VK_ACCESS_TRANSFER_READ_BIT, VK_ACCESS_TRANSFER_READ_BIT, 0, VK_WHOLE_SIZE); } QueueFamilyObjs *GetQueueFamilyInfo(Context *context, uint32_t qfi) { QueueFamilyObjs *qf; auto qf_it = context->queue_families.find(qfi); if (qf_it != context->queue_families.end()) { qf = &(qf_it->second); } else { qf = &(context->queue_families[context->default_index]); } return qf; } enum Modifier { NONE, DOUBLE_RECORD, DOUBLE_COMMAND_BUFFER, }; void operator()(std::string img_err, std::string buf_err, uint32_t src, uint32_t dst, bool positive = false, uint32_t queue_family_index = kInvalidQueueFamily, Modifier mod = Modifier::NONE) { auto monitor = context_->layer_test->Monitor(); monitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT, img_err); monitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT, buf_err); image_barrier_.srcQueueFamilyIndex = src; image_barrier_.dstQueueFamilyIndex = dst; buffer_barrier_.srcQueueFamilyIndex = src; buffer_barrier_.dstQueueFamilyIndex = dst; QueueFamilyObjs *qf = GetQueueFamilyInfo(context_, queue_family_index); VkCommandBufferObj *command_buffer = qf->command_buffer; for (int cb_repeat = 0; cb_repeat < (mod == Modifier::DOUBLE_COMMAND_BUFFER ? 2 : 1); cb_repeat++) { command_buffer->begin(); for (int repeat = 0; repeat < (mod == Modifier::DOUBLE_RECORD ? 2 : 1); repeat++) { vkCmdPipelineBarrier(command_buffer->handle(), VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 1, &buffer_barrier_, 1, &image_barrier_); } command_buffer->end(); command_buffer = qf->command_buffer2; // Second pass (if any) goes to the secondary command_buffer. } if (queue_family_index != kInvalidQueueFamily) { if (mod == Modifier::DOUBLE_COMMAND_BUFFER) { // the Fence resolves to VK_NULL_HANLE... i.e. no fence qf->queue->submit({{qf->command_buffer, qf->command_buffer2}}, vk_testing::Fence(), positive); } else { qf->command_buffer->QueueCommandBuffer(positive); // Check for success on positive tests only } } if (positive) { monitor->VerifyNotFound(); } else { monitor->VerifyFound(); } context_->Reset(); }; protected: static const uint32_t kInvalidQueueFamily = UINT32_MAX; Context *context_; VkImageObj image_; VkImageMemoryBarrier image_barrier_; VkBufferObj buffer_; VkBufferMemoryBarrier buffer_barrier_; }; TEST_F(VkLayerTest, InvalidBarrierQueueFamily) { TEST_DESCRIPTION("Create and submit barriers with invalid queue families"); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); // Find queues of two families const uint32_t submit_family = m_device->graphics_queue_node_index_; const uint32_t invalid = static_cast(m_device->queue_props.size()); const uint32_t other_family = submit_family != 0 ? 0 : 1; const bool only_one_family = (invalid == 1) || (m_device->queue_props[other_family].queueCount == 0); std::vector qf_indices{{submit_family, other_family}}; if (only_one_family) { qf_indices.resize(1); } BarrierQueueFamilyTestHelper::Context test_context(this, qf_indices); if (m_device->props.apiVersion >= VK_API_VERSION_1_1) { printf( "%s Device has apiVersion greater than 1.0 -- skipping test cases that require external memory " "to be " "disabled.\n", kSkipPrefix); } else { if (only_one_family) { printf("%s Single queue family found -- VK_SHARING_MODE_CONCURRENT testcases skipped.\n", kSkipPrefix); } else { std::vector families = {submit_family, other_family}; BarrierQueueFamilyTestHelper conc_test(&test_context); conc_test.Init(&families); // core_validation::barrier_queue_families::kSrcAndDestMustBeIgnore conc_test("VUID-VkImageMemoryBarrier-image-01199", "VUID-VkBufferMemoryBarrier-buffer-01190", VK_QUEUE_FAMILY_IGNORED, submit_family); conc_test("VUID-VkImageMemoryBarrier-image-01199", "VUID-VkBufferMemoryBarrier-buffer-01190", submit_family, VK_QUEUE_FAMILY_IGNORED); conc_test("VUID-VkImageMemoryBarrier-image-01199", "VUID-VkBufferMemoryBarrier-buffer-01190", submit_family, submit_family); // true -> positive test conc_test("VUID-VkImageMemoryBarrier-image-01199", "VUID-VkBufferMemoryBarrier-buffer-01190", VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, true); } BarrierQueueFamilyTestHelper excl_test(&test_context); excl_test.Init(nullptr); // no queue families means *exclusive* sharing mode. // core_validation::barrier_queue_families::kBothIgnoreOrBothValid excl_test("VUID-VkImageMemoryBarrier-image-01200", "VUID-VkBufferMemoryBarrier-buffer-01192", VK_QUEUE_FAMILY_IGNORED, submit_family); excl_test("VUID-VkImageMemoryBarrier-image-01200", "VUID-VkBufferMemoryBarrier-buffer-01192", submit_family, VK_QUEUE_FAMILY_IGNORED); // true -> positive test excl_test("VUID-VkImageMemoryBarrier-image-01200", "VUID-VkBufferMemoryBarrier-buffer-01192", submit_family, submit_family, true); excl_test("VUID-VkImageMemoryBarrier-image-01200", "VUID-VkBufferMemoryBarrier-buffer-01192", VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, true); } if (only_one_family) { printf("%s Single queue family found -- VK_SHARING_MODE_EXCLUSIVE submit testcases skipped.\n", kSkipPrefix); } else { BarrierQueueFamilyTestHelper excl_test(&test_context); excl_test.Init(nullptr); // core_validation::barrier_queue_families::kSubmitQueueMustMatchSrcOrDst excl_test("VUID-VkImageMemoryBarrier-image-01205", "VUID-VkBufferMemoryBarrier-buffer-01196", other_family, other_family, false, submit_family); // true -> positive test (testing both the index logic and the QFO transfer tracking. excl_test("POSITIVE_TEST", "POSITIVE_TEST", submit_family, other_family, true, submit_family); excl_test("POSITIVE_TEST", "POSITIVE_TEST", submit_family, other_family, true, other_family); excl_test("POSITIVE_TEST", "POSITIVE_TEST", other_family, submit_family, true, other_family); excl_test("POSITIVE_TEST", "POSITIVE_TEST", other_family, submit_family, true, submit_family); // negative testing for QFO transfer tracking // Duplicate release in one CB excl_test("UNASSIGNED-VkImageMemoryBarrier-image-00001", "UNASSIGNED-VkBufferMemoryBarrier-buffer-00001", submit_family, other_family, false, submit_family, BarrierQueueFamilyTestHelper::DOUBLE_RECORD); // Duplicate pending release excl_test("UNASSIGNED-VkImageMemoryBarrier-image-00003", "UNASSIGNED-VkBufferMemoryBarrier-buffer-00003", submit_family, other_family, false, submit_family); // Duplicate acquire in one CB excl_test("UNASSIGNED-VkImageMemoryBarrier-image-00001", "UNASSIGNED-VkBufferMemoryBarrier-buffer-00001", submit_family, other_family, false, other_family, BarrierQueueFamilyTestHelper::DOUBLE_RECORD); // No pending release excl_test("UNASSIGNED-VkImageMemoryBarrier-image-00004", "UNASSIGNED-VkBufferMemoryBarrier-buffer-00004", submit_family, other_family, false, other_family); // Duplicate release in two CB excl_test("UNASSIGNED-VkImageMemoryBarrier-image-00002", "UNASSIGNED-VkBufferMemoryBarrier-buffer-00002", submit_family, other_family, false, submit_family, BarrierQueueFamilyTestHelper::DOUBLE_COMMAND_BUFFER); // Duplicate acquire in two CB excl_test("POSITIVE_TEST", "POSITIVE_TEST", submit_family, other_family, true, submit_family); // need a succesful release excl_test("UNASSIGNED-VkImageMemoryBarrier-image-00002", "UNASSIGNED-VkBufferMemoryBarrier-buffer-00002", submit_family, other_family, false, other_family, BarrierQueueFamilyTestHelper::DOUBLE_COMMAND_BUFFER); } } TEST_F(VkLayerTest, InvalidBarrierQueueFamilyWithMemExt) { TEST_DESCRIPTION("Create and submit barriers with invalid queue families when memory extension is enabled "); std::vector reqd_instance_extensions = { {VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME}}; for (auto extension_name : reqd_instance_extensions) { if (InstanceExtensionSupported(extension_name)) { m_instance_extension_names.push_back(extension_name); } else { printf("%s Required instance extension %s not supported, skipping test\n", kSkipPrefix, extension_name); return; } } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Check for external memory device extensions if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME); } else { printf("%s External memory extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); // Find queues of two families const uint32_t submit_family = m_device->graphics_queue_node_index_; const uint32_t invalid = static_cast(m_device->queue_props.size()); const uint32_t other_family = submit_family != 0 ? 0 : 1; const bool only_one_family = (invalid == 1) || (m_device->queue_props[other_family].queueCount == 0); std::vector qf_indices{{submit_family, other_family}}; if (only_one_family) { qf_indices.resize(1); } BarrierQueueFamilyTestHelper::Context test_context(this, qf_indices); if (only_one_family) { printf("%s Single queue family found -- VK_SHARING_MODE_CONCURRENT testcases skipped.\n", kSkipPrefix); } else { std::vector families = {submit_family, other_family}; BarrierQueueFamilyTestHelper conc_test(&test_context); // core_validation::barrier_queue_families::kSrcOrDstMustBeIgnore conc_test.Init(&families); conc_test("VUID-VkImageMemoryBarrier-image-01381", "VUID-VkBufferMemoryBarrier-buffer-01191", submit_family, submit_family); // true -> positive test conc_test("VUID-VkImageMemoryBarrier-image-01381", "VUID-VkBufferMemoryBarrier-buffer-01191", VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, true); conc_test("VUID-VkImageMemoryBarrier-image-01381", "VUID-VkBufferMemoryBarrier-buffer-01191", VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_EXTERNAL_KHR, true); conc_test("VUID-VkImageMemoryBarrier-image-01381", "VUID-VkBufferMemoryBarrier-buffer-01191", VK_QUEUE_FAMILY_EXTERNAL_KHR, VK_QUEUE_FAMILY_IGNORED, true); // core_validation::barrier_queue_families::kSpecialOrIgnoreOnly conc_test("VUID-VkImageMemoryBarrier-image-01766", "VUID-VkBufferMemoryBarrier-buffer-01763", submit_family, VK_QUEUE_FAMILY_IGNORED); conc_test("VUID-VkImageMemoryBarrier-image-01766", "VUID-VkBufferMemoryBarrier-buffer-01763", VK_QUEUE_FAMILY_IGNORED, submit_family); // This is to flag the errors that would be considered only "unexpected" in the parallel case above // true -> positive test conc_test("VUID-VkImageMemoryBarrier-image-01766", "VUID-VkBufferMemoryBarrier-buffer-01763", VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_EXTERNAL_KHR, true); conc_test("VUID-VkImageMemoryBarrier-image-01766", "VUID-VkBufferMemoryBarrier-buffer-01763", VK_QUEUE_FAMILY_EXTERNAL_KHR, VK_QUEUE_FAMILY_IGNORED, true); } BarrierQueueFamilyTestHelper excl_test(&test_context); excl_test.Init(nullptr); // no queue families means *exclusive* sharing mode. // core_validation::barrier_queue_families::kSrcIgnoreRequiresDstIgnore excl_test("VUID-VkImageMemoryBarrier-image-01201", "VUID-VkBufferMemoryBarrier-buffer-01193", VK_QUEUE_FAMILY_IGNORED, submit_family); excl_test("VUID-VkImageMemoryBarrier-image-01201", "VUID-VkBufferMemoryBarrier-buffer-01193", VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_EXTERNAL_KHR); // true -> positive test excl_test("VUID-VkImageMemoryBarrier-image-01201", "VUID-VkBufferMemoryBarrier-buffer-01193", VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, true); // core_validation::barrier_queue_families::kDstValidOrSpecialIfNotIgnore excl_test("VUID-VkImageMemoryBarrier-image-01768", "VUID-VkBufferMemoryBarrier-buffer-01765", submit_family, invalid); // true -> positive test excl_test("VUID-VkImageMemoryBarrier-image-01768", "VUID-VkBufferMemoryBarrier-buffer-01765", submit_family, submit_family, true); excl_test("VUID-VkImageMemoryBarrier-image-01768", "VUID-VkBufferMemoryBarrier-buffer-01765", submit_family, VK_QUEUE_FAMILY_IGNORED, true); excl_test("VUID-VkImageMemoryBarrier-image-01768", "VUID-VkBufferMemoryBarrier-buffer-01765", submit_family, VK_QUEUE_FAMILY_EXTERNAL_KHR, true); // core_validation::barrier_queue_families::kSrcValidOrSpecialIfNotIgnore excl_test("VUID-VkImageMemoryBarrier-image-01767", "VUID-VkBufferMemoryBarrier-buffer-01764", invalid, submit_family); // true -> positive test excl_test("VUID-VkImageMemoryBarrier-image-01767", "VUID-VkBufferMemoryBarrier-buffer-01764", submit_family, submit_family, true); excl_test("VUID-VkImageMemoryBarrier-image-01767", "VUID-VkBufferMemoryBarrier-buffer-01764", VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, true); excl_test("VUID-VkImageMemoryBarrier-image-01767", "VUID-VkBufferMemoryBarrier-buffer-01764", VK_QUEUE_FAMILY_EXTERNAL_KHR, submit_family, true); } TEST_F(VkLayerTest, ImageBarrierWithBadRange) { TEST_DESCRIPTION("VkImageMemoryBarrier with an invalid subresourceRange"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(image.create_info().arrayLayers == 1); ASSERT_TRUE(image.initialized()); VkImageMemoryBarrier img_barrier_template = {}; img_barrier_template.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier_template.pNext = NULL; img_barrier_template.srcAccessMask = 0; img_barrier_template.dstAccessMask = 0; img_barrier_template.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED; img_barrier_template.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier_template.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier_template.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier_template.image = image.handle(); // subresourceRange to be set later for the for the purposes of this test img_barrier_template.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier_template.subresourceRange.baseArrayLayer = 0; img_barrier_template.subresourceRange.baseMipLevel = 0; img_barrier_template.subresourceRange.layerCount = 0; img_barrier_template.subresourceRange.levelCount = 0; m_commandBuffer->begin(); // Nested scope here confuses clang-format, somehow // clang-format off // try for vkCmdPipelineBarrier { // Try baseMipLevel >= image.mipLevels with VK_REMAINING_MIP_LEVELS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01486"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 1, VK_REMAINING_MIP_LEVELS, 0, 1}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try baseMipLevel >= image.mipLevels without VK_REMAINING_MIP_LEVELS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01486"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01724"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 1, 1, 0, 1}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try levelCount = 0 { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01724"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 0, 1}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try baseMipLevel + levelCount > image.mipLevels { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01724"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 2, 0, 1}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer >= image.arrayLayers with VK_REMAINING_ARRAY_LAYERS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01488"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 1, VK_REMAINING_ARRAY_LAYERS}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer >= image.arrayLayers without VK_REMAINING_ARRAY_LAYERS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01488"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01725"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 1, 1}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try layerCount = 0 { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01725"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 0}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer + layerCount > image.arrayLayers { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01725"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 2}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } } // try for vkCmdWaitEvents { VkEvent event; VkEventCreateInfo eci{VK_STRUCTURE_TYPE_EVENT_CREATE_INFO, NULL, 0}; VkResult err = vkCreateEvent(m_device->handle(), &eci, nullptr, &event); ASSERT_VK_SUCCESS(err); // Try baseMipLevel >= image.mipLevels with VK_REMAINING_MIP_LEVELS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01486"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 1, VK_REMAINING_MIP_LEVELS, 0, 1}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try baseMipLevel >= image.mipLevels without VK_REMAINING_MIP_LEVELS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01486"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01724"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 1, 1, 0, 1}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try levelCount = 0 { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01724"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 0, 1}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try baseMipLevel + levelCount > image.mipLevels { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01724"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 2, 0, 1}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer >= image.arrayLayers with VK_REMAINING_ARRAY_LAYERS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01488"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 1, VK_REMAINING_ARRAY_LAYERS}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer >= image.arrayLayers without VK_REMAINING_ARRAY_LAYERS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01488"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01725"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 1, 1}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try layerCount = 0 { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01725"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 0}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer + layerCount > image.arrayLayers { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01725"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 2}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } vkDestroyEvent(m_device->handle(), event, nullptr); } // clang-format on } TEST_F(VkLayerTest, ValidationCacheTestBadMerge) { ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), "VK_LAYER_LUNARG_core_validation", VK_EXT_VALIDATION_CACHE_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_EXT_VALIDATION_CACHE_EXTENSION_NAME); } else { printf("%s %s not supported, skipping test\n", kSkipPrefix, VK_EXT_VALIDATION_CACHE_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); // Load extension functions auto fpCreateValidationCache = (PFN_vkCreateValidationCacheEXT)vkGetDeviceProcAddr(m_device->device(), "vkCreateValidationCacheEXT"); auto fpDestroyValidationCache = (PFN_vkDestroyValidationCacheEXT)vkGetDeviceProcAddr(m_device->device(), "vkDestroyValidationCacheEXT"); auto fpMergeValidationCaches = (PFN_vkMergeValidationCachesEXT)vkGetDeviceProcAddr(m_device->device(), "vkMergeValidationCachesEXT"); if (!fpCreateValidationCache || !fpDestroyValidationCache || !fpMergeValidationCaches) { printf("%s Failed to load function pointers for %s\n", kSkipPrefix, VK_EXT_VALIDATION_CACHE_EXTENSION_NAME); return; } VkValidationCacheCreateInfoEXT validationCacheCreateInfo; validationCacheCreateInfo.sType = VK_STRUCTURE_TYPE_VALIDATION_CACHE_CREATE_INFO_EXT; validationCacheCreateInfo.pNext = NULL; validationCacheCreateInfo.initialDataSize = 0; validationCacheCreateInfo.pInitialData = NULL; validationCacheCreateInfo.flags = 0; VkValidationCacheEXT validationCache = VK_NULL_HANDLE; VkResult res = fpCreateValidationCache(m_device->device(), &validationCacheCreateInfo, nullptr, &validationCache); ASSERT_VK_SUCCESS(res); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkMergeValidationCachesEXT-dstCache-01536"); res = fpMergeValidationCaches(m_device->device(), validationCache, 1, &validationCache); m_errorMonitor->VerifyFound(); fpDestroyValidationCache(m_device->device(), validationCache, nullptr); } TEST_F(VkPositiveLayerTest, LayoutFromPresentWithoutAccessMemoryRead) { // Transition an image away from PRESENT_SRC_KHR without ACCESS_MEMORY_READ // in srcAccessMask. // The required behavior here was a bit unclear in earlier versions of the // spec, but there is no memory dependency required here, so this should // work without warnings. m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT), VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageMemoryBarrier barrier = {}; VkImageSubresourceRange range; barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; barrier.dstAccessMask = 0; barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; barrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; barrier.image = image.handle(); range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; range.baseMipLevel = 0; range.levelCount = 1; range.baseArrayLayer = 0; range.layerCount = 1; barrier.subresourceRange = range; VkCommandBufferObj cmdbuf(m_device, m_commandPool); cmdbuf.begin(); cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier); barrier.oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; barrier.srcAccessMask = 0; barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, IdxBufferAlignmentError) { // Bind a BeginRenderPass within an active RenderPass ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); uint32_t const indices[] = {0}; VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.size = 1024; buf_info.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT; buf_info.queueFamilyIndexCount = 1; buf_info.pQueueFamilyIndices = indices; VkBuffer buffer; VkResult err = vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements requirements; vkGetBufferMemoryRequirements(m_device->device(), buffer, &requirements); VkMemoryAllocateInfo alloc_info{}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; alloc_info.allocationSize = requirements.size; bool pass = m_device->phy().set_memory_type(requirements.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); ASSERT_TRUE(pass); VkDeviceMemory memory; err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &memory); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, memory, 0); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); ASSERT_VK_SUCCESS(err); // vkCmdBindPipeline(m_commandBuffer->handle(), // VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); // Should error before calling to driver so don't care about actual data m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdBindIndexBuffer() offset (0x7) does not fall on "); vkCmdBindIndexBuffer(m_commandBuffer->handle(), buffer, 7, VK_INDEX_TYPE_UINT16); m_errorMonitor->VerifyFound(); vkFreeMemory(m_device->device(), memory, NULL); vkDestroyBuffer(m_device->device(), buffer, NULL); } TEST_F(VkLayerTest, InvalidQueueFamilyIndex) { // Miscellaneous queueFamilyIndex validation tests ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkBufferCreateInfo buffCI = {}; buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffCI.size = 1024; buffCI.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT; buffCI.queueFamilyIndexCount = 2; // Introduce failure by specifying invalid queue_family_index uint32_t qfi[2]; qfi[0] = 777; qfi[1] = 0; buffCI.pQueueFamilyIndices = qfi; buffCI.sharingMode = VK_SHARING_MODE_CONCURRENT; // qfi only matters in CONCURRENT mode VkBuffer ib; // Test for queue family index out of range m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferCreateInfo-sharingMode-01419"); vkCreateBuffer(m_device->device(), &buffCI, NULL, &ib); m_errorMonitor->VerifyFound(); // Test for non-unique QFI in array qfi[0] = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferCreateInfo-sharingMode-01419"); vkCreateBuffer(m_device->device(), &buffCI, NULL, &ib); m_errorMonitor->VerifyFound(); if (m_device->queue_props.size() > 2) { VkBuffer ib2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "which was not created allowing concurrent"); // Create buffer shared to queue families 1 and 2, but submitted on queue family 0 buffCI.queueFamilyIndexCount = 2; qfi[0] = 1; qfi[1] = 2; vkCreateBuffer(m_device->device(), &buffCI, NULL, &ib2); VkDeviceMemory mem; VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(m_device->device(), ib2, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.allocationSize = mem_reqs.size; bool pass = false; pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); if (!pass) { printf("%s Failed to allocate required memory.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), ib2, NULL); return; } vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem); vkBindBufferMemory(m_device->device(), ib2, mem, 0); m_commandBuffer->begin(); vkCmdFillBuffer(m_commandBuffer->handle(), ib2, 0, 16, 5); m_commandBuffer->end(); m_commandBuffer->QueueCommandBuffer(false); m_errorMonitor->VerifyFound(); vkDestroyBuffer(m_device->device(), ib2, NULL); vkFreeMemory(m_device->device(), mem, NULL); } } TEST_F(VkLayerTest, ExecuteCommandsPrimaryCB) { TEST_DESCRIPTION("Attempt vkCmdExecuteCommands with a primary command buffer (should only be secondary)"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // An empty primary command buffer VkCommandBufferObj cb(m_device, m_commandPool); cb.begin(); cb.end(); m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &renderPassBeginInfo(), VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS); VkCommandBuffer handle = cb.handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdExecuteCommands() called w/ Primary Cmd Buffer "); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &handle); m_errorMonitor->VerifyFound(); m_errorMonitor->SetUnexpectedError("All elements of pCommandBuffers must not be in the pending state"); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, DSUsageBitsErrors) { TEST_DESCRIPTION("Attempt to update descriptor sets for images and buffers that do not have correct usage bits sets."); ASSERT_NO_FATAL_FAILURE(Init()); std::array ds_type_count; for (uint32_t i = 0; i < ds_type_count.size(); ++i) { ds_type_count[i].type = VkDescriptorType(i); ds_type_count[i].descriptorCount = 1; } vk_testing::DescriptorPool ds_pool; ds_pool.init(*m_device, vk_testing::DescriptorPool::create_info(0, VK_DESCRIPTOR_TYPE_RANGE_SIZE, ds_type_count)); ASSERT_TRUE(ds_pool.initialized()); std::vector dsl_bindings(1); dsl_bindings[0].binding = 0; dsl_bindings[0].descriptorType = VkDescriptorType(0); dsl_bindings[0].descriptorCount = 1; dsl_bindings[0].stageFlags = VK_SHADER_STAGE_ALL; dsl_bindings[0].pImmutableSamplers = NULL; // Create arrays of layout and descriptor objects using UpDescriptorSet = std::unique_ptr; std::vector descriptor_sets; using UpDescriptorSetLayout = std::unique_ptr; std::vector ds_layouts; descriptor_sets.reserve(VK_DESCRIPTOR_TYPE_RANGE_SIZE); ds_layouts.reserve(VK_DESCRIPTOR_TYPE_RANGE_SIZE); for (uint32_t i = 0; i < VK_DESCRIPTOR_TYPE_RANGE_SIZE; ++i) { dsl_bindings[0].descriptorType = VkDescriptorType(i); ds_layouts.push_back(UpDescriptorSetLayout(new VkDescriptorSetLayoutObj(m_device, dsl_bindings))); descriptor_sets.push_back(UpDescriptorSet(ds_pool.alloc_sets(*m_device, *ds_layouts.back()))); ASSERT_TRUE(descriptor_sets.back()->initialized()); } // Create a buffer & bufferView to be used for invalid updates const VkDeviceSize buffer_size = 256; uint8_t data[buffer_size]; VkConstantBufferObj buffer(m_device, buffer_size, data, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT); VkConstantBufferObj storage_texel_buffer(m_device, buffer_size, data, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT); ASSERT_TRUE(buffer.initialized() && storage_texel_buffer.initialized()); auto buff_view_ci = vk_testing::BufferView::createInfo(buffer.handle(), VK_FORMAT_R8_UNORM); vk_testing::BufferView buffer_view_obj, storage_texel_buffer_view_obj; buffer_view_obj.init(*m_device, buff_view_ci); buff_view_ci.buffer = storage_texel_buffer.handle(); storage_texel_buffer_view_obj.init(*m_device, buff_view_ci); ASSERT_TRUE(buffer_view_obj.initialized() && storage_texel_buffer_view_obj.initialized()); VkBufferView buffer_view = buffer_view_obj.handle(); VkBufferView storage_texel_buffer_view = storage_texel_buffer_view_obj.handle(); // Create an image to be used for invalid updates VkImageObj image_obj(m_device); image_obj.InitNoLayout(64, 64, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image_obj.initialized()); VkImageView image_view = image_obj.targetView(VK_FORMAT_R8G8B8A8_UNORM); VkDescriptorBufferInfo buff_info = {}; buff_info.buffer = buffer.handle(); VkDescriptorImageInfo img_info = {}; img_info.imageView = image_view; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.pTexelBufferView = &buffer_view; descriptor_write.pBufferInfo = &buff_info; descriptor_write.pImageInfo = &img_info; // These error messages align with VkDescriptorType struct std::string error_codes[] = { "VUID-VkWriteDescriptorSet-descriptorType-00326", // placeholder, no error for SAMPLER descriptor "VUID-VkWriteDescriptorSet-descriptorType-00326", // COMBINED_IMAGE_SAMPLER "VUID-VkWriteDescriptorSet-descriptorType-00326", // SAMPLED_IMAGE "VUID-VkWriteDescriptorSet-descriptorType-00326", // STORAGE_IMAGE "VUID-VkWriteDescriptorSet-descriptorType-00334", // UNIFORM_TEXEL_BUFFER "VUID-VkWriteDescriptorSet-descriptorType-00335", // STORAGE_TEXEL_BUFFER "VUID-VkWriteDescriptorSet-descriptorType-00330", // UNIFORM_BUFFER "VUID-VkWriteDescriptorSet-descriptorType-00331", // STORAGE_BUFFER "VUID-VkWriteDescriptorSet-descriptorType-00330", // UNIFORM_BUFFER_DYNAMIC "VUID-VkWriteDescriptorSet-descriptorType-00331", // STORAGE_BUFFER_DYNAMIC "VUID-VkWriteDescriptorSet-descriptorType-00326" // INPUT_ATTACHMENT }; // Start loop at 1 as SAMPLER desc type has no usage bit error for (uint32_t i = 1; i < VK_DESCRIPTOR_TYPE_RANGE_SIZE; ++i) { if (VkDescriptorType(i) == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) { // Now check for UNIFORM_TEXEL_BUFFER using storage_texel_buffer_view descriptor_write.pTexelBufferView = &storage_texel_buffer_view; } descriptor_write.descriptorType = VkDescriptorType(i); descriptor_write.dstSet = descriptor_sets[i]->handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, error_codes[i]); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); if (VkDescriptorType(i) == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) { descriptor_write.pTexelBufferView = &buffer_view; } } } TEST_F(VkLayerTest, DSBufferInfoErrors) { TEST_DESCRIPTION( "Attempt to update buffer descriptor set that has incorrect parameters in VkDescriptorBufferInfo struct. This includes:\n" "1. offset value greater than or equal to buffer size\n" "2. range value of 0\n" "3. range value greater than buffer (size - offset)"); VkResult err; // GPDDP2 needed for push descriptors support below bool gpdp2_support = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_SPEC_VERSION); if (gpdp2_support) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool update_template_support = DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME); if (update_template_support) { m_device_extension_names.push_back(VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME); } else { printf("%s Descriptor Update Template Extensions not supported, template cases skipped.\n", kSkipPrefix); } // Note: Includes workaround for some implementations which incorrectly return 0 maxPushDescriptors bool push_descriptor_support = gpdp2_support && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME) && (GetPushDescriptorProperties(instance(), gpu()).maxPushDescriptors > 0); if (push_descriptor_support) { m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); } else { printf("%s Push Descriptor Extension not supported, push descriptor cases skipped.\n", kSkipPrefix); } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); std::vector ds_bindings = { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}}; OneOffDescriptorSet ds(m_device, ds_bindings); // Create a buffer to be used for invalid updates VkBufferCreateInfo buff_ci = {}; buff_ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buff_ci.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buff_ci.size = m_device->props.limits.minUniformBufferOffsetAlignment; buff_ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; VkBuffer buffer; err = vkCreateBuffer(m_device->device(), &buff_ci, NULL, &buffer); ASSERT_VK_SUCCESS(err); // Have to bind memory to buffer before descriptor update VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(m_device->device(), buffer, &mem_reqs); VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = mem_reqs.size; mem_alloc.memoryTypeIndex = 0; bool pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, 0); if (!pass) { printf("%s Failed to allocate memory.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), buffer, NULL); return; } VkDeviceMemory mem; err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, mem, 0); ASSERT_VK_SUCCESS(err); VkDescriptorBufferInfo buff_info = {}; buff_info.buffer = buffer; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.pTexelBufferView = nullptr; descriptor_write.pBufferInfo = &buff_info; descriptor_write.pImageInfo = nullptr; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_write.dstSet = ds.set_; // Relying on the "return nullptr for non-enabled extensions auto vkCreateDescriptorUpdateTemplateKHR = (PFN_vkCreateDescriptorUpdateTemplateKHR)vkGetDeviceProcAddr(m_device->device(), "vkCreateDescriptorUpdateTemplateKHR"); auto vkDestroyDescriptorUpdateTemplateKHR = (PFN_vkDestroyDescriptorUpdateTemplateKHR)vkGetDeviceProcAddr(m_device->device(), "vkDestroyDescriptorUpdateTemplateKHR"); auto vkUpdateDescriptorSetWithTemplateKHR = (PFN_vkUpdateDescriptorSetWithTemplateKHR)vkGetDeviceProcAddr(m_device->device(), "vkUpdateDescriptorSetWithTemplateKHR"); if (update_template_support) { ASSERT_NE(vkCreateDescriptorUpdateTemplateKHR, nullptr); ASSERT_NE(vkDestroyDescriptorUpdateTemplateKHR, nullptr); ASSERT_NE(vkUpdateDescriptorSetWithTemplateKHR, nullptr); } // Setup for update w/ template tests // Create a template of descriptor set updates struct SimpleTemplateData { uint8_t padding[7]; VkDescriptorBufferInfo buff_info; uint32_t other_padding[4]; }; SimpleTemplateData update_template_data = {}; VkDescriptorUpdateTemplateEntry update_template_entry = {}; update_template_entry.dstBinding = 0; update_template_entry.dstArrayElement = 0; update_template_entry.descriptorCount = 1; update_template_entry.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; update_template_entry.offset = offsetof(SimpleTemplateData, buff_info); update_template_entry.stride = sizeof(SimpleTemplateData); auto update_template_ci = lvl_init_struct(); update_template_ci.descriptorUpdateEntryCount = 1; update_template_ci.pDescriptorUpdateEntries = &update_template_entry; update_template_ci.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET; update_template_ci.descriptorSetLayout = ds.layout_.handle(); VkDescriptorUpdateTemplate update_template = VK_NULL_HANDLE; if (update_template_support) { auto result = vkCreateDescriptorUpdateTemplateKHR(m_device->device(), &update_template_ci, nullptr, &update_template); ASSERT_VK_SUCCESS(result); } // VK_KHR_push_descriptor support auto vkCmdPushDescriptorSetKHR = (PFN_vkCmdPushDescriptorSetKHR)vkGetDeviceProcAddr(m_device->device(), "vkCmdPushDescriptorSetKHR"); auto vkCmdPushDescriptorSetWithTemplateKHR = (PFN_vkCmdPushDescriptorSetWithTemplateKHR)vkGetDeviceProcAddr(m_device->device(), "vkCmdPushDescriptorSetWithTemplateKHR"); std::unique_ptr push_dsl = nullptr; std::unique_ptr pipeline_layout = nullptr; VkDescriptorUpdateTemplate push_template = VK_NULL_HANDLE; if (push_descriptor_support) { ASSERT_NE(vkCmdPushDescriptorSetKHR, nullptr); push_dsl.reset( new VkDescriptorSetLayoutObj(m_device, ds_bindings, VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR)); pipeline_layout.reset(new VkPipelineLayoutObj(m_device, {push_dsl.get()})); ASSERT_TRUE(push_dsl->initialized()); if (update_template_support) { ASSERT_NE(vkCmdPushDescriptorSetWithTemplateKHR, nullptr); auto push_template_ci = lvl_init_struct(); push_template_ci.descriptorUpdateEntryCount = 1; push_template_ci.pDescriptorUpdateEntries = &update_template_entry; push_template_ci.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR; push_template_ci.descriptorSetLayout = VK_NULL_HANDLE; push_template_ci.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; push_template_ci.pipelineLayout = pipeline_layout->handle(); push_template_ci.set = 0; auto result = vkCreateDescriptorUpdateTemplateKHR(m_device->device(), &push_template_ci, nullptr, &push_template); ASSERT_VK_SUCCESS(result); } } auto do_test = [&](const char *desired_failure) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, desired_failure); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); if (push_descriptor_support) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, desired_failure); m_commandBuffer->begin(); vkCmdPushDescriptorSetKHR(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout->handle(), 0, 1, &descriptor_write); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); } if (update_template_support) { update_template_data.buff_info = buff_info; // copy the test case information into our "pData" m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, desired_failure); vkUpdateDescriptorSetWithTemplateKHR(m_device->device(), ds.set_, update_template, &update_template_data); m_errorMonitor->VerifyFound(); if (push_descriptor_support) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, desired_failure); m_commandBuffer->begin(); vkCmdPushDescriptorSetWithTemplateKHR(m_commandBuffer->handle(), push_template, pipeline_layout->handle(), 0, &update_template_data); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); } } }; // Cause error due to offset out of range buff_info.offset = buff_ci.size; buff_info.range = VK_WHOLE_SIZE; do_test("VUID-VkDescriptorBufferInfo-offset-00340"); // Now cause error due to range of 0 buff_info.offset = 0; buff_info.range = 0; do_test("VUID-VkDescriptorBufferInfo-range-00341"); // Now cause error due to range exceeding buffer size - offset buff_info.offset = 0; buff_info.range = buff_ci.size + 1; do_test("VUID-VkDescriptorBufferInfo-range-00342"); if (update_template_support) { vkDestroyDescriptorUpdateTemplateKHR(m_device->device(), update_template, nullptr); if (push_descriptor_support) { vkDestroyDescriptorUpdateTemplateKHR(m_device->device(), push_template, nullptr); } } vkFreeMemory(m_device->device(), mem, NULL); vkDestroyBuffer(m_device->device(), buffer, NULL); } TEST_F(VkLayerTest, DSBufferLimitErrors) { TEST_DESCRIPTION( "Attempt to update buffer descriptor set that has VkDescriptorBufferInfo values that violate device limits.\n" "Test cases include:\n" "1. range of uniform buffer update exceeds maxUniformBufferRange\n" "2. offset of uniform buffer update is not multiple of minUniformBufferOffsetAlignment\n" "3. using VK_WHOLE_SIZE with uniform buffer size exceeding maxUniformBufferRange\n" "4. range of storage buffer update exceeds maxStorageBufferRange\n" "5. offset of storage buffer update is not multiple of minStorageBufferOffsetAlignment\n" "6. using VK_WHOLE_SIZE with storage buffer size exceeding maxStorageBufferRange"); VkResult err; ASSERT_NO_FATAL_FAILURE(Init()); struct TestCase { VkDescriptorType descriptor_type; VkBufferUsageFlagBits buffer_usage; VkDeviceSize max_range; std::string max_range_vu; VkDeviceSize min_align; std::string min_align_vu; }; for (const auto &test_case : { TestCase({VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, m_device->props.limits.maxUniformBufferRange, "VUID-VkWriteDescriptorSet-descriptorType-00332", m_device->props.limits.minUniformBufferOffsetAlignment, "VUID-VkWriteDescriptorSet-descriptorType-00327"}), TestCase({VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, m_device->props.limits.maxStorageBufferRange, "VUID-VkWriteDescriptorSet-descriptorType-00333", m_device->props.limits.minStorageBufferOffsetAlignment, "VUID-VkWriteDescriptorSet-descriptorType-00328"}), }) { // Create layout with single buffer OneOffDescriptorSet ds(m_device, { {0, test_case.descriptor_type, 1, VK_SHADER_STAGE_ALL, nullptr}, }); // Create a buffer to be used for invalid updates VkBufferCreateInfo bci = {}; bci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bci.usage = test_case.buffer_usage; bci.size = test_case.max_range + test_case.min_align; // Make buffer bigger than range limit bci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; VkBuffer buffer; err = vkCreateBuffer(m_device->device(), &bci, NULL, &buffer); ASSERT_VK_SUCCESS(err); // Have to bind memory to buffer before descriptor update VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(m_device->device(), buffer, &mem_reqs); VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = mem_reqs.size; bool pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, 0); if (!pass) { printf("%s Failed to allocate memory in DSBufferLimitErrors; skipped.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), buffer, NULL); continue; } VkDeviceMemory mem; err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem); if (VK_SUCCESS != err) { printf("%s Failed to allocate memory in DSBufferLimitErrors; skipped.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), buffer, NULL); continue; } err = vkBindBufferMemory(m_device->device(), buffer, mem, 0); ASSERT_VK_SUCCESS(err); VkDescriptorBufferInfo buff_info = {}; buff_info.buffer = buffer; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.pTexelBufferView = nullptr; descriptor_write.pBufferInfo = &buff_info; descriptor_write.pImageInfo = nullptr; descriptor_write.descriptorType = test_case.descriptor_type; descriptor_write.dstSet = ds.set_; // Exceed range limit if (test_case.max_range != UINT32_MAX) { buff_info.range = test_case.max_range + 1; buff_info.offset = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.max_range_vu); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); } // Reduce size of range to acceptable limit and cause offset error if (test_case.min_align > 1) { buff_info.range = test_case.max_range; buff_info.offset = test_case.min_align - 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.min_align_vu); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); } // Exceed effective range limit by using VK_WHOLE_SIZE buff_info.range = VK_WHOLE_SIZE; buff_info.offset = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.max_range_vu); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); // Cleanup vkFreeMemory(m_device->device(), mem, NULL); vkDestroyBuffer(m_device->device(), buffer, NULL); } } TEST_F(VkLayerTest, DSAspectBitsErrors) { // TODO : Initially only catching case where DEPTH & STENCIL aspect bits // are set, but could expand this test to hit more cases. TEST_DESCRIPTION("Attempt to update descriptor sets for images that do not have correct aspect bits sets."); VkResult err; ASSERT_NO_FATAL_FAILURE(Init()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1, VK_SHADER_STAGE_ALL, nullptr}, }); // Create an image to be used for invalid updates VkImageObj image_obj(m_device); image_obj.Init(64, 64, 1, depth_format, VK_IMAGE_USAGE_SAMPLED_BIT); if (!image_obj.initialized()) { printf("%s Depth + Stencil format cannot be sampled. Skipped.\n", kSkipPrefix); return; } VkImage image = image_obj.image(); // Now create view for image VkImageViewCreateInfo image_view_ci = {}; image_view_ci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; image_view_ci.image = image; image_view_ci.format = depth_format; image_view_ci.viewType = VK_IMAGE_VIEW_TYPE_2D; image_view_ci.subresourceRange.layerCount = 1; image_view_ci.subresourceRange.baseArrayLayer = 0; image_view_ci.subresourceRange.levelCount = 1; // Setting both depth & stencil aspect bits is illegal for an imageView used // to populate a descriptor set. image_view_ci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; VkImageView image_view; err = vkCreateImageView(m_device->device(), &image_view_ci, NULL, &image_view); ASSERT_VK_SUCCESS(err); VkDescriptorImageInfo img_info = {}; img_info.imageView = image_view; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.pTexelBufferView = NULL; descriptor_write.pBufferInfo = NULL; descriptor_write.pImageInfo = &img_info; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; descriptor_write.dstSet = ds.set_; // TODO(whenning42): Update this check to look for a VUID when this error is // assigned one. const char *error_msg = " please only set either VK_IMAGE_ASPECT_DEPTH_BIT or VK_IMAGE_ASPECT_STENCIL_BIT "; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, error_msg); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); vkDestroyImageView(m_device->device(), image_view, NULL); } TEST_F(VkLayerTest, DSTypeMismatch) { // Create DS w/ layout of one type and attempt Update w/ mis-matched type VkResult err; m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, " binding #0 with type VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER but update type is VK_DESCRIPTOR_TYPE_SAMPLER"); ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); VkDescriptorImageInfo info = {}; info.sampler = sampler; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.descriptorCount = 1; // This is a mismatched type for the layout which expects BUFFER descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; descriptor_write.pImageInfo = &info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); vkDestroySampler(m_device->device(), sampler, NULL); } TEST_F(VkLayerTest, DSUpdateOutOfBounds) { // For overlapping Update, have arrayIndex exceed that of layout m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-dstArrayElement-00321"); ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkBufferTest buffer_test(m_device, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT); if (!buffer_test.GetBufferCurrent()) { // Something prevented creation of buffer so abort printf("%s Buffer creation failed, skipping test\n", kSkipPrefix); return; } // Correctly update descriptor to avoid "NOT_UPDATED" error VkDescriptorBufferInfo buff_info = {}; buff_info.buffer = buffer_test.GetBuffer(); buff_info.offset = 0; buff_info.range = 1024; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstArrayElement = 1; /* This index out of bounds for the update */ descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_write.pBufferInfo = &buff_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidDSUpdateIndex) { // Create layout w/ count of 1 and attempt update to that layout w/ binding index 2 VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-dstBinding-00315"); ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); VkDescriptorImageInfo info = {}; info.sampler = sampler; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 2; descriptor_write.descriptorCount = 1; // This is the wrong type, but out of bounds will be flagged first descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; descriptor_write.pImageInfo = &info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); vkDestroySampler(m_device->device(), sampler, NULL); } TEST_F(VkLayerTest, DSUpdateEmptyBinding) { // Create layout w/ empty binding and attempt to update it VkResult err; ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_SAMPLER, 0 /* !! */, VK_SHADER_STAGE_ALL, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); VkDescriptorImageInfo info = {}; info.sampler = sampler; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; // Lie here to avoid parameter_validation error // This is the wrong type, but empty binding error will be flagged first descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; descriptor_write.pImageInfo = &info; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-dstBinding-00316"); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); vkDestroySampler(m_device->device(), sampler, NULL); } TEST_F(VkLayerTest, InvalidDSUpdateStruct) { // Call UpdateDS w/ struct type other than valid VK_STRUCTUR_TYPE_UPDATE_* // types VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, ".sType must be VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET"); ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); VkDescriptorImageInfo info = {}; info.sampler = sampler; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; /* Intentionally broken struct type */ descriptor_write.dstSet = ds.set_; descriptor_write.descriptorCount = 1; // This is the wrong type, but out of bounds will be flagged first descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; descriptor_write.pImageInfo = &info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); vkDestroySampler(m_device->device(), sampler, NULL); } TEST_F(VkLayerTest, SampleDescriptorUpdateError) { // Create a single Sampler descriptor and send it an invalid Sampler m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-00325"); ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkSampler sampler = (VkSampler)((size_t)0xbaadbeef); // Sampler with invalid handle VkDescriptorImageInfo descriptor_info; memset(&descriptor_info, 0, sizeof(VkDescriptorImageInfo)); descriptor_info.sampler = sampler; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; descriptor_write.pImageInfo = &descriptor_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ImageViewDescriptorUpdateError) { // Create a single combined Image/Sampler descriptor and send it an invalid // imageView VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-00326"); ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); VkImageView view = (VkImageView)((size_t)0xbaadbeef); // invalid imageView object VkDescriptorImageInfo descriptor_info; memset(&descriptor_info, 0, sizeof(VkDescriptorImageInfo)); descriptor_info.sampler = sampler; descriptor_info.imageView = view; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &descriptor_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); vkDestroySampler(m_device->device(), sampler, NULL); } TEST_F(VkLayerTest, CopyDescriptorUpdateErrors) { // Create DS w/ layout of 2 types, write update 1 and attempt to copy-update // into the other VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " binding #1 with type VK_DESCRIPTOR_TYPE_SAMPLER. Types do not match."); ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, {1, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); VkDescriptorImageInfo info = {}; info.sampler = sampler; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(VkWriteDescriptorSet)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 1; // SAMPLER binding from layout above descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; descriptor_write.pImageInfo = &info; // This write update should succeed vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Now perform a copy update that fails due to type mismatch VkCopyDescriptorSet copy_ds_update; memset(©_ds_update, 0, sizeof(VkCopyDescriptorSet)); copy_ds_update.sType = VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET; copy_ds_update.srcSet = ds.set_; copy_ds_update.srcBinding = 1; // Copy from SAMPLER binding copy_ds_update.dstSet = ds.set_; copy_ds_update.dstBinding = 0; // ERROR : copy to UNIFORM binding copy_ds_update.descriptorCount = 1; // copy 1 descriptor vkUpdateDescriptorSets(m_device->device(), 0, NULL, 1, ©_ds_update); m_errorMonitor->VerifyFound(); // Now perform a copy update that fails due to binding out of bounds m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " does not have copy update src binding of 3."); memset(©_ds_update, 0, sizeof(VkCopyDescriptorSet)); copy_ds_update.sType = VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET; copy_ds_update.srcSet = ds.set_; copy_ds_update.srcBinding = 3; // ERROR : Invalid binding for matching layout copy_ds_update.dstSet = ds.set_; copy_ds_update.dstBinding = 0; copy_ds_update.descriptorCount = 1; // Copy 1 descriptor vkUpdateDescriptorSets(m_device->device(), 0, NULL, 1, ©_ds_update); m_errorMonitor->VerifyFound(); // Now perform a copy update that fails due to binding out of bounds m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " binding#1 with offset index of 1 plus update array offset of 0 and update of 5 " "descriptors oversteps total number of descriptors in set: 2."); memset(©_ds_update, 0, sizeof(VkCopyDescriptorSet)); copy_ds_update.sType = VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET; copy_ds_update.srcSet = ds.set_; copy_ds_update.srcBinding = 1; copy_ds_update.dstSet = ds.set_; copy_ds_update.dstBinding = 0; copy_ds_update.descriptorCount = 5; // ERROR copy 5 descriptors (out of bounds for layout) vkUpdateDescriptorSets(m_device->device(), 0, NULL, 1, ©_ds_update); m_errorMonitor->VerifyFound(); vkDestroySampler(m_device->device(), sampler, NULL); } TEST_F(VkPositiveLayerTest, CopyNonupdatedDescriptors) { TEST_DESCRIPTION("Copy non-updated descriptors"); unsigned int i; ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet src_ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, {1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_ALL, nullptr}, {2, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); OneOffDescriptorSet dst_ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, {1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_ALL, nullptr}, }); m_errorMonitor->ExpectSuccess(); const unsigned int copy_size = 2; VkCopyDescriptorSet copy_ds_update[copy_size]; memset(copy_ds_update, 0, sizeof(copy_ds_update)); for (i = 0; i < copy_size; i++) { copy_ds_update[i].sType = VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET; copy_ds_update[i].srcSet = src_ds.set_; copy_ds_update[i].srcBinding = i; copy_ds_update[i].dstSet = dst_ds.set_; copy_ds_update[i].dstBinding = i; copy_ds_update[i].descriptorCount = 1; } vkUpdateDescriptorSets(m_device->device(), 0, NULL, copy_size, copy_ds_update); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, NumSamplesMismatch) { // Create CommandBuffer where MSAA samples doesn't match RenderPass // sampleCount m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Num samples mismatch! "); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkPipelineMultisampleStateCreateInfo pipe_ms_state_ci = {}; pipe_ms_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; pipe_ms_state_ci.pNext = NULL; pipe_ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_4_BIT; pipe_ms_state_ci.sampleShadingEnable = 0; pipe_ms_state_ci.minSampleShading = 1.0; pipe_ms_state_ci.pSampleMask = NULL; const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); // We shouldn't need a fragment shader // but add it to be able to run on more devices VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.SetMSAA(&pipe_ms_state_ci); m_errorMonitor->SetUnexpectedError("VUID-VkGraphicsPipelineCreateInfo-subpass-00757"); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); VkViewport viewport = {0, 0, 16, 16, 0, 1}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); // Render triangle (the error should trigger on the attempt to draw). m_commandBuffer->Draw(3, 1, 0, 0); // Finalize recording of the command buffer m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DrawWithPipelineIncompatibleWithRenderPass) { TEST_DESCRIPTION( "Hit RenderPass incompatible cases. Initial case is drawing with an active renderpass that's not compatible with the bound " "pipeline state object's creation renderpass"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); // We shouldn't need a fragment shader // but add it to be able to run on more devices // Create a renderpass that will be incompatible with default renderpass VkAttachmentReference color_att = {}; color_att.layout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription subpass = {}; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &color_att; VkRenderPassCreateInfo rpci = {}; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 1; VkAttachmentDescription attach_desc = {}; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; // Format incompatible with PSO RP color attach format B8G8R8A8_UNORM attach_desc.format = VK_FORMAT_R8G8B8A8_UNORM; attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; rpci.pAttachments = &attach_desc; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; VkRenderPass rp; vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; m_viewports.push_back(viewport); pipe.SetViewport(m_viewports); VkRect2D rect = {{0, 0}, {64, 64}}; m_scissors.push_back(rect); pipe.SetScissor(m_scissors); pipe.CreateVKPipeline(pipeline_layout.handle(), rp); VkCommandBufferInheritanceInfo cbii = {}; cbii.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; cbii.renderPass = rp; cbii.subpass = 0; VkCommandBufferBeginInfo cbbi = {}; cbbi.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cbbi.pInheritanceInfo = &cbii; vkBeginCommandBuffer(m_commandBuffer->handle(), &cbbi); vkCmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDraw-renderPass-00435"); // Render triangle (the error should trigger on the attempt to draw). m_commandBuffer->Draw(3, 1, 0, 0); // Finalize recording of the command buffer m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); vkDestroyRenderPass(m_device->device(), rp, NULL); } TEST_F(VkLayerTest, NumBlendAttachMismatch) { // Create Pipeline where the number of blend attachments doesn't match the // number of color attachments. In this case, we don't add any color // blend attachments even though we have a color attachment. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-attachmentCount-00746"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkPipelineMultisampleStateCreateInfo pipe_ms_state_ci = {}; pipe_ms_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; pipe_ms_state_ci.pNext = NULL; pipe_ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; pipe_ms_state_ci.sampleShadingEnable = 0; pipe_ms_state_ci.minSampleShading = 1.0; pipe_ms_state_ci.pSampleMask = NULL; const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); // We shouldn't need a fragment shader // but add it to be able to run on more devices VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.SetMSAA(&pipe_ms_state_ci); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, Bad2DArrayImageType) { TEST_DESCRIPTION("Create an image with a flag specifying 2D_ARRAY_COMPATIBLE but not of imageType 3D."); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); } else { printf("%s %s is not supported; skipping\n", kSkipPrefix, VK_KHR_MAINTENANCE1_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); // Trigger check by setting imagecreateflags to 2d_array_compat and imageType to 2D VkImageCreateInfo ici = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR, VK_IMAGE_TYPE_2D, VK_FORMAT_R8G8B8A8_UNORM, {32, 32, 1}, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_SAMPLED_BIT, VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-flags-00950"); VkImage image; vkCreateImage(m_device->device(), &ici, NULL, &image); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, Maint1BindingSliceOf3DImage) { TEST_DESCRIPTION( "Attempt to bind a slice of a 3D texture in a descriptor set. This is explicitly disallowed by KHR_maintenance1 to keep " "things simple for drivers."); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); } else { printf("%s %s is not supported; skipping\n", kSkipPrefix, VK_KHR_MAINTENANCE1_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkResult err; OneOffDescriptorSet set(m_device, { {0, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, }); VkImageCreateInfo ici = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR, VK_IMAGE_TYPE_3D, VK_FORMAT_R8G8B8A8_UNORM, {32, 32, 32}, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_SAMPLED_BIT, VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; VkImageObj image(m_device); image.init(&ici); ASSERT_TRUE(image.initialized()); VkImageViewCreateInfo ivci = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, nullptr, 0, image.handle(), VK_IMAGE_VIEW_TYPE_2D, VK_FORMAT_R8G8B8A8_UNORM, {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY}, {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}, }; VkImageView view; err = vkCreateImageView(m_device->device(), &ivci, nullptr, &view); ASSERT_VK_SUCCESS(err); // Meat of the test. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorImageInfo-imageView-00343"); VkDescriptorImageInfo dii = {VK_NULL_HANDLE, view, VK_IMAGE_LAYOUT_GENERAL}; VkWriteDescriptorSet write = {VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, nullptr, set.set_, 0, 0, 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, &dii, nullptr, nullptr}; vkUpdateDescriptorSets(m_device->device(), 1, &write, 0, nullptr); m_errorMonitor->VerifyFound(); vkDestroyImageView(m_device->device(), view, nullptr); } TEST_F(VkLayerTest, MissingClearAttachment) { TEST_DESCRIPTION("Points to a wrong colorAttachment index in a VkClearAttachment structure passed to vkCmdClearAttachments"); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearAttachments-aspectMask-02501"); VKTriangleTest(BsoFailCmdClearAttachments); m_errorMonitor->VerifyFound(); } TEST_F(VkPositiveLayerTest, ConfirmNoVLErrorWhenVkCmdClearAttachmentsCalledInSecondaryCB) { TEST_DESCRIPTION( "This test is to verify that when vkCmdClearAttachments is called by a secondary commandbuffer, the validation layers do " "not throw an error if the primary commandbuffer begins a renderpass before executing the secondary commandbuffer."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkCommandBufferObj secondary(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); VkCommandBufferBeginInfo info = {}; VkCommandBufferInheritanceInfo hinfo = {}; info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT; info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; info.pInheritanceInfo = &hinfo; hinfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; hinfo.pNext = NULL; hinfo.renderPass = renderPass(); hinfo.subpass = 0; hinfo.framebuffer = m_framebuffer; hinfo.occlusionQueryEnable = VK_FALSE; hinfo.queryFlags = 0; hinfo.pipelineStatistics = 0; secondary.begin(&info); VkClearAttachment color_attachment; color_attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; color_attachment.clearValue.color.float32[0] = 0.0; color_attachment.clearValue.color.float32[1] = 0.0; color_attachment.clearValue.color.float32[2] = 0.0; color_attachment.clearValue.color.float32[3] = 0.0; color_attachment.colorAttachment = 0; VkClearRect clear_rect = {{{0, 0}, {(uint32_t)m_width, (uint32_t)m_height}}, 0, 1}; vkCmdClearAttachments(secondary.handle(), 1, &color_attachment, 1, &clear_rect); secondary.end(); // Modify clear rect here to verify that it doesn't cause validation error clear_rect = {{{0, 0}, {99999999, 99999999}}, 0, 0}; m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); vkCmdEndRenderPass(m_commandBuffer->handle()); m_commandBuffer->end(); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, CmdClearAttachmentTests) { TEST_DESCRIPTION("Various tests for validating usage of vkCmdClearAttachments"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkPipelineMultisampleStateCreateInfo pipe_ms_state_ci = {}; pipe_ms_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; pipe_ms_state_ci.pNext = NULL; pipe_ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; pipe_ms_state_ci.sampleShadingEnable = 0; pipe_ms_state_ci.minSampleShading = 1.0; pipe_ms_state_ci.pSampleMask = NULL; const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); // We shouldn't need a fragment shader but add it to be able to run // on more devices VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.SetMSAA(&pipe_ms_state_ci); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); // Main thing we care about for this test is that the VkImage obj we're // clearing matches Color Attachment of FB // Also pass down other dummy params to keep driver and paramchecker happy VkClearAttachment color_attachment; color_attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; color_attachment.clearValue.color.float32[0] = 1.0; color_attachment.clearValue.color.float32[1] = 1.0; color_attachment.clearValue.color.float32[2] = 1.0; color_attachment.clearValue.color.float32[3] = 1.0; color_attachment.colorAttachment = 0; VkClearRect clear_rect = {{{0, 0}, {(uint32_t)m_width, (uint32_t)m_height}}, 0, 1}; // Call for full-sized FB Color attachment prior to issuing a Draw m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, "vkCmdClearAttachments() issued on command buffer object "); vkCmdClearAttachments(m_commandBuffer->handle(), 1, &color_attachment, 1, &clear_rect); m_errorMonitor->VerifyFound(); clear_rect.rect.extent.width = renderPassBeginInfo().renderArea.extent.width + 4; clear_rect.rect.extent.height = clear_rect.rect.extent.height / 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearAttachments-pRects-00016"); vkCmdClearAttachments(m_commandBuffer->handle(), 1, &color_attachment, 1, &clear_rect); m_errorMonitor->VerifyFound(); // baseLayer >= view layers clear_rect.rect.extent.width = (uint32_t)m_width; clear_rect.baseArrayLayer = 1; clear_rect.layerCount = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearAttachments-pRects-00017"); vkCmdClearAttachments(m_commandBuffer->handle(), 1, &color_attachment, 1, &clear_rect); m_errorMonitor->VerifyFound(); // baseLayer + layerCount > view layers clear_rect.rect.extent.width = (uint32_t)m_width; clear_rect.baseArrayLayer = 0; clear_rect.layerCount = 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearAttachments-pRects-00017"); vkCmdClearAttachments(m_commandBuffer->handle(), 1, &color_attachment, 1, &clear_rect); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, VtxBufferBadIndex) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, "but no vertex buffers are attached to this Pipeline State Object"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkPipelineMultisampleStateCreateInfo pipe_ms_state_ci = {}; pipe_ms_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; pipe_ms_state_ci.pNext = NULL; pipe_ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; pipe_ms_state_ci.sampleShadingEnable = 0; pipe_ms_state_ci.minSampleShading = 1.0; pipe_ms_state_ci.pSampleMask = NULL; const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); // We shouldn't need a fragment shader // but add it to be able to run on more devices VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.SetMSAA(&pipe_ms_state_ci); pipe.SetViewport(m_viewports); pipe.SetScissor(m_scissors); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); // Don't care about actual data, just need to get to draw to flag error static const float vbo_data[3] = {1.f, 0.f, 1.f}; VkConstantBufferObj vbo(m_device, sizeof(vbo_data), (const void *)&vbo_data, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT); m_commandBuffer->BindVertexBuffer(&vbo, (VkDeviceSize)0, 1); // VBO idx 1, but no VBO in PSO m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, InvalidQueryPoolCreate) { TEST_DESCRIPTION("Attempt to create a query pool for PIPELINE_STATISTICS without enabling pipeline stats for the device."); ASSERT_NO_FATAL_FAILURE(Init()); vk_testing::QueueCreateInfoArray queue_info(m_device->queue_props); VkDevice local_device; VkDeviceCreateInfo device_create_info = {}; auto features = m_device->phy().features(); // Intentionally disable pipeline stats features.pipelineStatisticsQuery = VK_FALSE; device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; device_create_info.pNext = NULL; device_create_info.queueCreateInfoCount = queue_info.size(); device_create_info.pQueueCreateInfos = queue_info.data(); device_create_info.enabledLayerCount = 0; device_create_info.ppEnabledLayerNames = NULL; device_create_info.pEnabledFeatures = &features; VkResult err = vkCreateDevice(gpu(), &device_create_info, nullptr, &local_device); ASSERT_VK_SUCCESS(err); VkQueryPoolCreateInfo qpci{}; qpci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; qpci.queryType = VK_QUERY_TYPE_PIPELINE_STATISTICS; qpci.queryCount = 1; VkQueryPool query_pool; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkQueryPoolCreateInfo-queryType-00791"); vkCreateQueryPool(local_device, &qpci, nullptr, &query_pool); m_errorMonitor->VerifyFound(); vkDestroyDevice(local_device, nullptr); } TEST_F(VkLayerTest, UnclosedQuery) { TEST_DESCRIPTION("End a command buffer with a query still in progress."); const char *invalid_query = "Ending command buffer with in progress query: queryPool 0x"; ASSERT_NO_FATAL_FAILURE(Init()); VkEvent event; VkEventCreateInfo event_create_info{}; event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; vkCreateEvent(m_device->device(), &event_create_info, nullptr, &event); VkQueue queue = VK_NULL_HANDLE; vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 0, &queue); m_commandBuffer->begin(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, invalid_query); VkQueryPool query_pool; VkQueryPoolCreateInfo query_pool_create_info = {}; query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; query_pool_create_info.queryType = VK_QUERY_TYPE_OCCLUSION; query_pool_create_info.queryCount = 1; vkCreateQueryPool(m_device->device(), &query_pool_create_info, nullptr, &query_pool); vkCmdResetQueryPool(m_commandBuffer->handle(), query_pool, 0 /*startQuery*/, 1 /*queryCount*/); vkCmdBeginQuery(m_commandBuffer->handle(), query_pool, 0, 0); vkEndCommandBuffer(m_commandBuffer->handle()); m_errorMonitor->VerifyFound(); vkDestroyQueryPool(m_device->device(), query_pool, nullptr); vkDestroyEvent(m_device->device(), event, nullptr); } TEST_F(VkLayerTest, QueryPreciseBit) { TEST_DESCRIPTION("Check for correct Query Precise Bit circumstances."); ASSERT_NO_FATAL_FAILURE(Init()); // These tests require that the device support pipeline statistics query VkPhysicalDeviceFeatures device_features = {}; ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features)); if (VK_TRUE != device_features.pipelineStatisticsQuery) { printf("%s Test requires unsupported pipelineStatisticsQuery feature. Skipped.\n", kSkipPrefix); return; } std::vector device_extension_names; auto features = m_device->phy().features(); // Test for precise bit when query type is not OCCLUSION if (features.occlusionQueryPrecise) { VkEvent event; VkEventCreateInfo event_create_info{}; event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; vkCreateEvent(m_device->handle(), &event_create_info, nullptr, &event); m_commandBuffer->begin(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBeginQuery-queryType-00800"); VkQueryPool query_pool; VkQueryPoolCreateInfo query_pool_create_info = {}; query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; query_pool_create_info.queryType = VK_QUERY_TYPE_PIPELINE_STATISTICS; query_pool_create_info.queryCount = 1; vkCreateQueryPool(m_device->handle(), &query_pool_create_info, nullptr, &query_pool); vkCmdResetQueryPool(m_commandBuffer->handle(), query_pool, 0, 1); vkCmdBeginQuery(m_commandBuffer->handle(), query_pool, 0, VK_QUERY_CONTROL_PRECISE_BIT); vkCmdEndQuery(m_commandBuffer->handle(), query_pool, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); vkDestroyQueryPool(m_device->handle(), query_pool, nullptr); vkDestroyEvent(m_device->handle(), event, nullptr); } // Test for precise bit when precise feature is not available features.occlusionQueryPrecise = false; VkDeviceObj test_device(0, gpu(), device_extension_names, &features); VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = test_device.graphics_queue_node_index_; VkCommandPool command_pool; vkCreateCommandPool(test_device.handle(), &pool_create_info, nullptr, &command_pool); VkCommandBufferAllocateInfo cmd = {}; cmd.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; cmd.pNext = NULL; cmd.commandPool = command_pool; cmd.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; cmd.commandBufferCount = 1; VkCommandBuffer cmd_buffer; VkResult err = vkAllocateCommandBuffers(test_device.handle(), &cmd, &cmd_buffer); ASSERT_VK_SUCCESS(err); VkEvent event; VkEventCreateInfo event_create_info{}; event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; vkCreateEvent(test_device.handle(), &event_create_info, nullptr, &event); VkCommandBufferBeginInfo begin_info = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, nullptr}; vkBeginCommandBuffer(cmd_buffer, &begin_info); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBeginQuery-queryType-00800"); VkQueryPool query_pool; VkQueryPoolCreateInfo query_pool_create_info = {}; query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; query_pool_create_info.queryType = VK_QUERY_TYPE_OCCLUSION; query_pool_create_info.queryCount = 1; vkCreateQueryPool(test_device.handle(), &query_pool_create_info, nullptr, &query_pool); vkCmdResetQueryPool(cmd_buffer, query_pool, 0, 1); vkCmdBeginQuery(cmd_buffer, query_pool, 0, VK_QUERY_CONTROL_PRECISE_BIT); vkCmdEndQuery(cmd_buffer, query_pool, 0); m_errorMonitor->VerifyFound(); vkEndCommandBuffer(cmd_buffer); vkDestroyQueryPool(test_device.handle(), query_pool, nullptr); vkDestroyEvent(test_device.handle(), event, nullptr); vkDestroyCommandPool(test_device.handle(), command_pool, nullptr); } TEST_F(VkLayerTest, VertexBufferInvalid) { TEST_DESCRIPTION( "Submit a command buffer using deleted vertex buffer, delete a buffer twice, use an invalid offset for each buffer type, " "and attempt to bind a null buffer"); const char *deleted_buffer_in_command_buffer = "Cannot submit cmd buffer using deleted buffer "; const char *invalid_offset_message = "VUID-vkBindBufferMemory-memoryOffset-01036"; ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipelineMultisampleStateCreateInfo pipe_ms_state_ci = {}; pipe_ms_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; pipe_ms_state_ci.pNext = NULL; pipe_ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; pipe_ms_state_ci.sampleShadingEnable = 0; pipe_ms_state_ci.minSampleShading = 1.0; pipe_ms_state_ci.pSampleMask = nullptr; const VkPipelineLayoutObj pipeline_layout(m_device); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.SetMSAA(&pipe_ms_state_ci); pipe.SetViewport(m_viewports); pipe.SetScissor(m_scissors); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); { // Create and bind a vertex buffer in a reduced scope, which will cause // it to be deleted upon leaving this scope const float vbo_data[3] = {1.f, 0.f, 1.f}; VkVerticesObj draw_verticies(m_device, 1, 1, sizeof(vbo_data[0]), sizeof(vbo_data) / sizeof(vbo_data[0]), vbo_data); draw_verticies.BindVertexBuffers(m_commandBuffer->handle()); draw_verticies.AddVertexInputToPipe(pipe); } m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, deleted_buffer_in_command_buffer); m_commandBuffer->QueueCommandBuffer(false); m_errorMonitor->VerifyFound(); { // Create and bind a vertex buffer in a reduced scope, and delete it // twice, the second through the destructor VkBufferTest buffer_test(m_device, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, VkBufferTest::eDoubleDelete); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyBuffer-buffer-parameter"); buffer_test.TestDoubleDestroy(); } m_errorMonitor->VerifyFound(); m_errorMonitor->SetUnexpectedError("value of pCreateInfo->usage must not be 0"); if (VkBufferTest::GetTestConditionValid(m_device, VkBufferTest::eInvalidMemoryOffset)) { // Create and bind a memory buffer with an invalid offset. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, invalid_offset_message); m_errorMonitor->SetUnexpectedError( "If buffer was created with the VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT or VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, " "memoryOffset must be a multiple of VkPhysicalDeviceLimits::minTexelBufferOffsetAlignment"); VkBufferTest buffer_test(m_device, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, VkBufferTest::eInvalidMemoryOffset); (void)buffer_test; m_errorMonitor->VerifyFound(); } { // Attempt to bind a null buffer. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkBindBufferMemory: required parameter buffer specified as VK_NULL_HANDLE"); VkBufferTest buffer_test(m_device, 0, VkBufferTest::eBindNullBuffer); (void)buffer_test; m_errorMonitor->VerifyFound(); } { // Attempt to bind a fake buffer. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-buffer-parameter"); VkBufferTest buffer_test(m_device, 0, VkBufferTest::eBindFakeBuffer); (void)buffer_test; m_errorMonitor->VerifyFound(); } { // Attempt to use an invalid handle to delete a buffer. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkFreeMemory-memory-parameter"); VkBufferTest buffer_test(m_device, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, VkBufferTest::eFreeInvalidHandle); (void)buffer_test; } m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, BadVertexBufferOffset) { TEST_DESCRIPTION("Submit an offset past the end of a vertex buffer"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); static const float vbo_data[3] = {1.f, 0.f, 1.f}; VkConstantBufferObj vbo(m_device, sizeof(vbo_data), (const void *)&vbo_data, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBindVertexBuffers-pOffsets-00626"); m_commandBuffer->BindVertexBuffer(&vbo, (VkDeviceSize)(3 * sizeof(float)), 1); // Offset at the end of the buffer m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, InvalidVertexAttributeAlignment) { TEST_DESCRIPTION("Check for proper aligment of attribAddress which depends on a bound pipeline and on a bound vertex buffer"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const VkPipelineLayoutObj pipeline_layout(m_device); struct VboEntry { uint16_t input0[2]; uint32_t input1; float input2[4]; }; const unsigned vbo_entry_count = 3; const VboEntry vbo_data[vbo_entry_count] = {}; VkConstantBufferObj vbo(m_device, static_cast(sizeof(VboEntry) * vbo_entry_count), reinterpret_cast(vbo_data), VK_BUFFER_USAGE_VERTEX_BUFFER_BIT); VkVertexInputBindingDescription input_binding; input_binding.binding = 0; input_binding.stride = sizeof(VboEntry); input_binding.inputRate = VK_VERTEX_INPUT_RATE_VERTEX; VkVertexInputAttributeDescription input_attribs[3]; input_attribs[0].binding = 0; // Location switch between attrib[0] and attrib[1] is intentional input_attribs[0].location = 1; input_attribs[0].format = VK_FORMAT_A8B8G8R8_UNORM_PACK32; input_attribs[0].offset = offsetof(VboEntry, input1); input_attribs[1].binding = 0; input_attribs[1].location = 0; input_attribs[1].format = VK_FORMAT_R16G16_UNORM; input_attribs[1].offset = offsetof(VboEntry, input0); input_attribs[2].binding = 0; input_attribs[2].location = 2; input_attribs[2].format = VK_FORMAT_R32G32B32A32_SFLOAT; input_attribs[2].offset = offsetof(VboEntry, input2); char const *vsSource = "#version 450\n" "\n" "layout(location = 0) in vec2 input0;" "layout(location = 1) in vec4 input1;" "layout(location = 2) in vec4 input2;" "\n" "void main(){\n" " gl_Position = input1 + input2;\n" " gl_Position.xy += input0;\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe1(m_device); pipe1.AddDefaultColorAttachment(); pipe1.AddShader(&vs); pipe1.AddShader(&fs); pipe1.AddVertexInputBindings(&input_binding, 1); pipe1.AddVertexInputAttribs(&input_attribs[0], 3); pipe1.SetViewport(m_viewports); pipe1.SetScissor(m_scissors); pipe1.CreateVKPipeline(pipeline_layout.handle(), renderPass()); input_binding.stride = 6; VkPipelineObj pipe2(m_device); pipe2.AddDefaultColorAttachment(); pipe2.AddShader(&vs); pipe2.AddShader(&fs); pipe2.AddVertexInputBindings(&input_binding, 1); pipe2.AddVertexInputAttribs(&input_attribs[0], 3); pipe2.SetViewport(m_viewports); pipe2.SetScissor(m_scissors); pipe2.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); // Test with invalid buffer offset VkDeviceSize offset = 1; vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe1.handle()); vkCmdBindVertexBuffers(m_commandBuffer->handle(), 0, 1, &vbo.handle(), &offset); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Invalid attribAddress alignment for vertex attribute 0"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Invalid attribAddress alignment for vertex attribute 1"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Invalid attribAddress alignment for vertex attribute 2"); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); // Test with invalid buffer stride offset = 0; vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe2.handle()); vkCmdBindVertexBuffers(m_commandBuffer->handle(), 0, 1, &vbo.handle(), &offset); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Invalid attribAddress alignment for vertex attribute 0"); // Attribute[1] is aligned properly even with a wrong stride m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Invalid attribAddress alignment for vertex attribute 2"); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, InvalidVertexBindingDescriptions) { TEST_DESCRIPTION( "Attempt to create a graphics pipeline where:" "1) count of vertex bindings exceeds device's maxVertexInputBindings limit" "2) requested bindings include a duplicate binding value"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const VkPipelineLayoutObj pipeline_layout(m_device); const uint32_t binding_count = m_device->props.limits.maxVertexInputBindings + 1; std::vector input_bindings(binding_count); for (uint32_t i = 0; i < binding_count; ++i) { input_bindings[i].binding = i; input_bindings[i].stride = 4; input_bindings[i].inputRate = VK_VERTEX_INPUT_RATE_VERTEX; } // Let the last binding description use same binding as the first one input_bindings[binding_count - 1].binding = 0; VkVertexInputAttributeDescription input_attrib; input_attrib.binding = 0; input_attrib.location = 0; input_attrib.format = VK_FORMAT_R32G32B32_SFLOAT; input_attrib.offset = 0; VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddVertexInputBindings(input_bindings.data(), binding_count); pipe.AddVertexInputAttribs(&input_attrib, 1); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineVertexInputStateCreateInfo-vertexBindingDescriptionCount-00613"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineVertexInputStateCreateInfo-pVertexBindingDescriptions-00616"); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidVertexAttributeDescriptions) { TEST_DESCRIPTION( "Attempt to create a graphics pipeline where:" "1) count of vertex attributes exceeds device's maxVertexInputAttributes limit" "2) requested location include a duplicate location value" "3) binding used by one attribute is not defined by a binding description"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const VkPipelineLayoutObj pipeline_layout(m_device); VkVertexInputBindingDescription input_binding; input_binding.binding = 0; input_binding.stride = 4; input_binding.inputRate = VK_VERTEX_INPUT_RATE_VERTEX; const uint32_t attribute_count = m_device->props.limits.maxVertexInputAttributes + 1; std::vector input_attribs(attribute_count); for (uint32_t i = 0; i < attribute_count; ++i) { input_attribs[i].binding = 0; input_attribs[i].location = i; input_attribs[i].format = VK_FORMAT_R32G32B32_SFLOAT; input_attribs[i].offset = 0; } // Let the last input_attribs description use same location as the first one input_attribs[attribute_count - 1].location = 0; // Let the last input_attribs description use binding which is not defined input_attribs[attribute_count - 1].binding = 1; VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddVertexInputBindings(&input_binding, 1); pipe.AddVertexInputAttribs(input_attribs.data(), attribute_count); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineVertexInputStateCreateInfo-vertexAttributeDescriptionCount-00614"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineVertexInputStateCreateInfo-binding-00615"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineVertexInputStateCreateInfo-pVertexAttributeDescriptions-00617"); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_errorMonitor->VerifyFound(); } // INVALID_IMAGE_LAYOUT tests (one other case is hit by MapMemWithoutHostVisibleBit and not here) TEST_F(VkLayerTest, InvalidImageLayout) { TEST_DESCRIPTION( "Hit all possible validation checks associated with the UNASSIGNED-CoreValidation-DrawState-InvalidImageLayout error. " "Generally these involve having images in the wrong layout when they're copied or transitioned."); // 3 in ValidateCmdBufImageLayouts // * -1 Attempt to submit cmd buf w/ deleted image // * -2 Cmd buf submit of image w/ layout not matching first use w/ subresource // * -3 Cmd buf submit of image w/ layout not matching first use w/o subresource ASSERT_NO_FATAL_FAILURE(Init()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } // Create src & dst images to use for copy operations VkImage src_image; VkImage dst_image; VkImage depth_image; const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; const int32_t tex_width = 32; const int32_t tex_height = 32; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = tex_width; image_create_info.extent.height = tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 4; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image_create_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; image_create_info.flags = 0; VkResult err = vkCreateImage(m_device->device(), &image_create_info, NULL, &src_image); ASSERT_VK_SUCCESS(err); image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &dst_image); ASSERT_VK_SUCCESS(err); image_create_info.format = VK_FORMAT_D16_UNORM; image_create_info.usage |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &depth_image); ASSERT_VK_SUCCESS(err); // Allocate memory VkMemoryRequirements img_mem_reqs = {}; VkMemoryAllocateInfo mem_alloc = {}; VkDeviceMemory src_image_mem, dst_image_mem, depth_image_mem; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = 0; mem_alloc.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), src_image, &img_mem_reqs); mem_alloc.allocationSize = img_mem_reqs.size; bool pass = m_device->phy().set_memory_type(img_mem_reqs.memoryTypeBits, &mem_alloc, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &src_image_mem); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), dst_image, &img_mem_reqs); mem_alloc.allocationSize = img_mem_reqs.size; pass = m_device->phy().set_memory_type(img_mem_reqs.memoryTypeBits, &mem_alloc, 0); ASSERT_VK_SUCCESS(err); err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &dst_image_mem); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), depth_image, &img_mem_reqs); mem_alloc.allocationSize = img_mem_reqs.size; pass = m_device->phy().set_memory_type(img_mem_reqs.memoryTypeBits, &mem_alloc, 0); ASSERT_VK_SUCCESS(err); err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &depth_image_mem); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), src_image, src_image_mem, 0); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), dst_image, dst_image_mem, 0); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), depth_image, depth_image_mem, 0); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); VkImageCopy copy_region; copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.srcSubresource.mipLevel = 0; copy_region.srcSubresource.baseArrayLayer = 0; copy_region.srcSubresource.layerCount = 1; copy_region.srcOffset.x = 0; copy_region.srcOffset.y = 0; copy_region.srcOffset.z = 0; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.dstSubresource.mipLevel = 0; copy_region.dstSubresource.baseArrayLayer = 0; copy_region.dstSubresource.layerCount = 1; copy_region.dstOffset.x = 0; copy_region.dstOffset.y = 0; copy_region.dstOffset.z = 0; copy_region.extent.width = 1; copy_region.extent.height = 1; copy_region.extent.depth = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, "layout should be VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL instead of GENERAL."); m_errorMonitor->SetUnexpectedError("layout should be VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL instead of GENERAL."); m_commandBuffer->CopyImage(src_image, VK_IMAGE_LAYOUT_GENERAL, dst_image, VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); // The first call hits the expected WARNING and skips the call down the chain, so call a second time to call down chain and // update layer state m_errorMonitor->SetUnexpectedError("layout should be VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL instead of GENERAL."); m_errorMonitor->SetUnexpectedError("layout should be VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL instead of GENERAL."); m_commandBuffer->CopyImage(src_image, VK_IMAGE_LAYOUT_GENERAL, dst_image, VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); // Now cause error due to src image layout changing m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcImageLayout-00128"); m_errorMonitor->SetUnexpectedError("is VK_IMAGE_LAYOUT_UNDEFINED but can only be VK_IMAGE_LAYOUT"); m_commandBuffer->CopyImage(src_image, VK_IMAGE_LAYOUT_UNDEFINED, dst_image, VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); // Final src error is due to bad layout type m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcImageLayout-00129"); m_errorMonitor->SetUnexpectedError( "with specific layout VK_IMAGE_LAYOUT_UNDEFINED that doesn't match the actual current layout VK_IMAGE_LAYOUT_GENERAL."); m_commandBuffer->CopyImage(src_image, VK_IMAGE_LAYOUT_UNDEFINED, dst_image, VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); // Now verify same checks for dst m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, "layout should be VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL instead of GENERAL."); m_errorMonitor->SetUnexpectedError("layout should be VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL instead of GENERAL."); m_commandBuffer->CopyImage(src_image, VK_IMAGE_LAYOUT_GENERAL, dst_image, VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); // Now cause error due to src image layout changing m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-dstImageLayout-00133"); m_errorMonitor->SetUnexpectedError( "is VK_IMAGE_LAYOUT_UNDEFINED but can only be VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL or VK_IMAGE_LAYOUT_GENERAL."); m_commandBuffer->CopyImage(src_image, VK_IMAGE_LAYOUT_GENERAL, dst_image, VK_IMAGE_LAYOUT_UNDEFINED, 1, ©_region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-dstImageLayout-00134"); m_errorMonitor->SetUnexpectedError( "with specific layout VK_IMAGE_LAYOUT_UNDEFINED that doesn't match the actual current layout VK_IMAGE_LAYOUT_GENERAL."); m_commandBuffer->CopyImage(src_image, VK_IMAGE_LAYOUT_GENERAL, dst_image, VK_IMAGE_LAYOUT_UNDEFINED, 1, ©_region); m_errorMonitor->VerifyFound(); // Convert dst and depth images to TRANSFER_DST for subsequent tests VkImageMemoryBarrier transfer_dst_image_barrier[1] = {}; transfer_dst_image_barrier[0].sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; transfer_dst_image_barrier[0].oldLayout = VK_IMAGE_LAYOUT_UNDEFINED; transfer_dst_image_barrier[0].newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; transfer_dst_image_barrier[0].srcAccessMask = 0; transfer_dst_image_barrier[0].dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; transfer_dst_image_barrier[0].image = dst_image; transfer_dst_image_barrier[0].subresourceRange.layerCount = image_create_info.arrayLayers; transfer_dst_image_barrier[0].subresourceRange.levelCount = image_create_info.mipLevels; transfer_dst_image_barrier[0].subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, NULL, 0, NULL, 1, transfer_dst_image_barrier); transfer_dst_image_barrier[0].image = depth_image; transfer_dst_image_barrier[0].subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, NULL, 0, NULL, 1, transfer_dst_image_barrier); // Cause errors due to clearing with invalid image layouts VkClearColorValue color_clear_value = {}; VkImageSubresourceRange clear_range; clear_range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; clear_range.baseMipLevel = 0; clear_range.baseArrayLayer = 0; clear_range.layerCount = 1; clear_range.levelCount = 1; // Fail due to explicitly prohibited layout for color clear (only GENERAL and TRANSFER_DST are permitted). // Since the image is currently not in UNDEFINED layout, this will emit two errors. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-imageLayout-00005"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-imageLayout-00004"); m_commandBuffer->ClearColorImage(dst_image, VK_IMAGE_LAYOUT_UNDEFINED, &color_clear_value, 1, &clear_range); m_errorMonitor->VerifyFound(); // Fail due to provided layout not matching actual current layout for color clear. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-imageLayout-00004"); m_commandBuffer->ClearColorImage(dst_image, VK_IMAGE_LAYOUT_GENERAL, &color_clear_value, 1, &clear_range); m_errorMonitor->VerifyFound(); VkClearDepthStencilValue depth_clear_value = {}; clear_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; // Fail due to explicitly prohibited layout for depth clear (only GENERAL and TRANSFER_DST are permitted). // Since the image is currently not in UNDEFINED layout, this will emit two errors. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-imageLayout-00012"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-imageLayout-00011"); m_commandBuffer->ClearDepthStencilImage(depth_image, VK_IMAGE_LAYOUT_UNDEFINED, &depth_clear_value, 1, &clear_range); m_errorMonitor->VerifyFound(); // Fail due to provided layout not matching actual current layout for depth clear. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-imageLayout-00011"); m_commandBuffer->ClearDepthStencilImage(depth_image, VK_IMAGE_LAYOUT_GENERAL, &depth_clear_value, 1, &clear_range); m_errorMonitor->VerifyFound(); // Now cause error due to bad image layout transition in PipelineBarrier VkImageMemoryBarrier image_barrier[1] = {}; image_barrier[0].sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; image_barrier[0].oldLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL; image_barrier[0].newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; image_barrier[0].image = src_image; image_barrier[0].subresourceRange.layerCount = image_create_info.arrayLayers; image_barrier[0].subresourceRange.levelCount = image_create_info.mipLevels; image_barrier[0].subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-oldLayout-01197"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-oldLayout-01210"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, NULL, 0, NULL, 1, image_barrier); m_errorMonitor->VerifyFound(); // Finally some layout errors at RenderPass create time // Just hacking in specific state to get to the errors we want so don't copy this unless you know what you're doing. VkAttachmentReference attach = {}; // perf warning for GENERAL layout w/ non-DS input attachment attach.layout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription subpass = {}; subpass.inputAttachmentCount = 1; subpass.pInputAttachments = &attach; VkRenderPassCreateInfo rpci = {}; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 1; VkAttachmentDescription attach_desc = {}; attach_desc.format = VK_FORMAT_UNDEFINED; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; rpci.pAttachments = &attach_desc; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; VkRenderPass rp; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL."); vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); m_errorMonitor->VerifyFound(); // error w/ non-general layout attach.layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "Layout for input attachment is VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL but can only be READ_ONLY_OPTIMAL or GENERAL."); vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); m_errorMonitor->VerifyFound(); subpass.inputAttachmentCount = 0; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &attach; attach.layout = VK_IMAGE_LAYOUT_GENERAL; // perf warning for GENERAL layout on color attachment m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL."); vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); m_errorMonitor->VerifyFound(); // error w/ non-color opt or GENERAL layout for color attachment attach.layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "Layout for color attachment is VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL."); vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); m_errorMonitor->VerifyFound(); subpass.colorAttachmentCount = 0; subpass.pDepthStencilAttachment = &attach; attach.layout = VK_IMAGE_LAYOUT_GENERAL; // perf warning for GENERAL layout on DS attachment m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, "GENERAL layout for depth attachment may not give optimal performance."); vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); m_errorMonitor->VerifyFound(); // error w/ non-ds opt or GENERAL layout for color attachment attach.layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Layout for depth attachment is VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL but can only be " "DEPTH_STENCIL_ATTACHMENT_OPTIMAL, DEPTH_STENCIL_READ_ONLY_OPTIMAL or GENERAL."); vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); m_errorMonitor->VerifyFound(); // For this error we need a valid renderpass so create default one attach.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL; attach.attachment = 0; attach_desc.format = depth_format; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.storeOp = VK_ATTACHMENT_STORE_OP_STORE; attach_desc.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; attach_desc.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; // Can't do a CLEAR load on READ_ONLY initialLayout attach_desc.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; attach_desc.initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL; attach_desc.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "with invalid first layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL"); vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); m_errorMonitor->VerifyFound(); vkFreeMemory(m_device->device(), src_image_mem, NULL); vkFreeMemory(m_device->device(), dst_image_mem, NULL); vkFreeMemory(m_device->device(), depth_image_mem, NULL); vkDestroyImage(m_device->device(), src_image, NULL); vkDestroyImage(m_device->device(), dst_image, NULL); vkDestroyImage(m_device->device(), depth_image, NULL); } TEST_F(VkLayerTest, InvalidStorageImageLayout) { TEST_DESCRIPTION("Attempt to update a STORAGE_IMAGE descriptor w/o GENERAL layout."); ASSERT_NO_FATAL_FAILURE(Init()); const VkFormat tex_format = VK_FORMAT_R8G8B8A8_UNORM; VkImageTiling tiling; VkFormatProperties format_properties; vkGetPhysicalDeviceFormatProperties(gpu(), tex_format, &format_properties); if (format_properties.linearTilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT) { tiling = VK_IMAGE_TILING_LINEAR; } else if (format_properties.optimalTilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT) { tiling = VK_IMAGE_TILING_OPTIMAL; } else { printf("%s Device does not support VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT; skipped.\n", kSkipPrefix); return; } OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, }); VkImageObj image(m_device); image.Init(32, 32, 1, tex_format, VK_IMAGE_USAGE_STORAGE_BIT, tiling, 0); ASSERT_TRUE(image.initialized()); VkImageView view = image.targetView(tex_format); VkDescriptorImageInfo image_info = {}; image_info.imageView = view; image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; descriptor_write.pImageInfo = &image_info; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " of VK_DESCRIPTOR_TYPE_STORAGE_IMAGE type is being updated with layout " "VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL but according to spec "); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, NonSimultaneousSecondaryMarksPrimary) { ASSERT_NO_FATAL_FAILURE(Init()); const char *simultaneous_use_message = "does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer"; VkCommandBufferObj secondary(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); secondary.begin(); secondary.end(); VkCommandBufferBeginInfo cbbi = { VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT, nullptr, }; m_commandBuffer->begin(&cbbi); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, simultaneous_use_message); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, SimultaneousUseSecondaryTwoExecutes) { ASSERT_NO_FATAL_FAILURE(Init()); const char *simultaneous_use_message = "without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set!"; VkCommandBufferObj secondary(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); VkCommandBufferInheritanceInfo inh = { VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO, nullptr, }; VkCommandBufferBeginInfo cbbi = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, 0, &inh}; secondary.begin(&cbbi); secondary.end(); m_commandBuffer->begin(); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, simultaneous_use_message); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, SimultaneousUseSecondarySingleExecute) { ASSERT_NO_FATAL_FAILURE(Init()); // variation on previous test executing the same CB twice in the same // CmdExecuteCommands call const char *simultaneous_use_message = "without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set!"; VkCommandBufferObj secondary(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); VkCommandBufferInheritanceInfo inh = { VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO, nullptr, }; VkCommandBufferBeginInfo cbbi = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, 0, &inh}; secondary.begin(&cbbi); secondary.end(); m_commandBuffer->begin(); VkCommandBuffer cbs[] = {secondary.handle(), secondary.handle()}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, simultaneous_use_message); vkCmdExecuteCommands(m_commandBuffer->handle(), 2, cbs); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, SimultaneousUseOneShot) { TEST_DESCRIPTION("Submit the same command buffer twice in one submit looking for simultaneous use and one time submit errors"); const char *simultaneous_use_message = "is already in use and is not marked for simultaneous use"; const char *one_shot_message = "VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT set, but has been submitted"; ASSERT_NO_FATAL_FAILURE(Init()); VkCommandBuffer cmd_bufs[2]; VkCommandBufferAllocateInfo alloc_info; alloc_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.commandBufferCount = 2; alloc_info.commandPool = m_commandPool->handle(); alloc_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &alloc_info, cmd_bufs); VkCommandBufferBeginInfo cb_binfo; cb_binfo.pNext = NULL; cb_binfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cb_binfo.pInheritanceInfo = VK_NULL_HANDLE; cb_binfo.flags = 0; vkBeginCommandBuffer(cmd_bufs[0], &cb_binfo); VkViewport viewport = {0, 0, 16, 16, 0, 1}; vkCmdSetViewport(cmd_bufs[0], 0, 1, &viewport); vkEndCommandBuffer(cmd_bufs[0]); VkCommandBuffer duplicates[2] = {cmd_bufs[0], cmd_bufs[0]}; VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 2; submit_info.pCommandBuffers = duplicates; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, simultaneous_use_message); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); // Set one time use and now look for one time submit duplicates[0] = duplicates[1] = cmd_bufs[1]; cb_binfo.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT | VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; vkBeginCommandBuffer(cmd_bufs[1], &cb_binfo); vkCmdSetViewport(cmd_bufs[1], 0, 1, &viewport); vkEndCommandBuffer(cmd_bufs[1]); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, one_shot_message); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); } TEST_F(VkLayerTest, StageMaskGsTsEnabled) { TEST_DESCRIPTION( "Attempt to use a stageMask w/ geometry shader and tesselation shader bits enabled when those features are disabled on the " "device."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); std::vector device_extension_names; auto features = m_device->phy().features(); // Make sure gs & ts are disabled features.geometryShader = false; features.tessellationShader = false; // The sacrificial device object VkDeviceObj test_device(0, gpu(), device_extension_names, &features); VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = test_device.graphics_queue_node_index_; VkCommandPool command_pool; vkCreateCommandPool(test_device.handle(), &pool_create_info, nullptr, &command_pool); VkCommandBufferAllocateInfo cmd = {}; cmd.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; cmd.pNext = NULL; cmd.commandPool = command_pool; cmd.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; cmd.commandBufferCount = 1; VkCommandBuffer cmd_buffer; VkResult err = vkAllocateCommandBuffers(test_device.handle(), &cmd, &cmd_buffer); ASSERT_VK_SUCCESS(err); VkEvent event; VkEventCreateInfo evci = {}; evci.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; VkResult result = vkCreateEvent(test_device.handle(), &evci, NULL, &event); ASSERT_VK_SUCCESS(result); VkCommandBufferBeginInfo cbbi = {}; cbbi.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(cmd_buffer, &cbbi); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetEvent-stageMask-01150"); vkCmdSetEvent(cmd_buffer, event, VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetEvent-stageMask-01151"); vkCmdSetEvent(cmd_buffer, event, VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT); m_errorMonitor->VerifyFound(); vkDestroyEvent(test_device.handle(), event, NULL); vkDestroyCommandPool(test_device.handle(), command_pool, NULL); } TEST_F(VkLayerTest, EventInUseDestroyedSignaled) { ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); VkEvent event; VkEventCreateInfo event_create_info = {}; event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; vkCreateEvent(m_device->device(), &event_create_info, nullptr, &event); vkCmdSetEvent(m_commandBuffer->handle(), event, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT); m_commandBuffer->end(); vkDestroyEvent(m_device->device(), event, nullptr); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "that is invalid because bound"); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InUseDestroyedSignaled) { TEST_DESCRIPTION( "Use vkCmdExecuteCommands with invalid state in primary and secondary command buffers. Delete objects that are in use. " "Call VkQueueSubmit with an event that has been deleted."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_errorMonitor->ExpectSuccess(); VkSemaphoreCreateInfo semaphore_create_info = {}; semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; VkSemaphore semaphore; ASSERT_VK_SUCCESS(vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore)); VkFenceCreateInfo fence_create_info = {}; fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; VkFence fence; ASSERT_VK_SUCCESS(vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence)); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkBufferTest buffer_test(m_device, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT); VkDescriptorBufferInfo buffer_info = {}; buffer_info.buffer = buffer_test.GetBuffer(); buffer_info.offset = 0; buffer_info.range = 1024; VkWriteDescriptorSet write_descriptor_set = {}; write_descriptor_set.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; write_descriptor_set.dstSet = ds.set_; write_descriptor_set.descriptorCount = 1; write_descriptor_set.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; write_descriptor_set.pBufferInfo = &buffer_info; vkUpdateDescriptorSets(m_device->device(), 1, &write_descriptor_set, 0, nullptr); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); pipe.CreateVKPipeline(pipeline_layout.handle(), m_renderPass); VkEvent event; VkEventCreateInfo event_create_info = {}; event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; vkCreateEvent(m_device->device(), &event_create_info, nullptr, &event); m_commandBuffer->begin(); vkCmdSetEvent(m_commandBuffer->handle(), event, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, 0, NULL); m_commandBuffer->end(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); submit_info.signalSemaphoreCount = 1; submit_info.pSignalSemaphores = &semaphore; vkQueueSubmit(m_device->m_queue, 1, &submit_info, fence); m_errorMonitor->Reset(); // resume logmsg processing m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyEvent-event-01145"); vkDestroyEvent(m_device->device(), event, nullptr); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroySemaphore-semaphore-01137"); vkDestroySemaphore(m_device->device(), semaphore, nullptr); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Fence 0x"); vkDestroyFence(m_device->device(), fence, nullptr); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); m_errorMonitor->SetUnexpectedError("If semaphore is not VK_NULL_HANDLE, semaphore must be a valid VkSemaphore handle"); m_errorMonitor->SetUnexpectedError("Unable to remove Semaphore obj"); vkDestroySemaphore(m_device->device(), semaphore, nullptr); m_errorMonitor->SetUnexpectedError("If fence is not VK_NULL_HANDLE, fence must be a valid VkFence handle"); m_errorMonitor->SetUnexpectedError("Unable to remove Fence obj"); vkDestroyFence(m_device->device(), fence, nullptr); m_errorMonitor->SetUnexpectedError("If event is not VK_NULL_HANDLE, event must be a valid VkEvent handle"); m_errorMonitor->SetUnexpectedError("Unable to remove Event obj"); vkDestroyEvent(m_device->device(), event, nullptr); } TEST_F(VkLayerTest, QueryPoolInUseDestroyedSignaled) { TEST_DESCRIPTION("Delete in-use query pool."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkQueryPool query_pool; VkQueryPoolCreateInfo query_pool_ci{}; query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; query_pool_ci.queryType = VK_QUERY_TYPE_TIMESTAMP; query_pool_ci.queryCount = 1; vkCreateQueryPool(m_device->device(), &query_pool_ci, nullptr, &query_pool); m_commandBuffer->begin(); // Reset query pool to create binding with cmd buffer vkCmdResetQueryPool(m_commandBuffer->handle(), query_pool, 0, 1); m_commandBuffer->end(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetQueryPoolResults-queryType-00818"); uint32_t data_space[16]; m_errorMonitor->SetUnexpectedError("Cannot get query results on queryPool"); vkGetQueryPoolResults(m_device->handle(), query_pool, 0, 1, sizeof(data_space), &data_space, sizeof(uint32_t), VK_QUERY_RESULT_PARTIAL_BIT); m_errorMonitor->VerifyFound(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); // Submit cmd buffer and then destroy query pool while in-flight vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyQueryPool-queryPool-00793"); vkDestroyQueryPool(m_device->handle(), query_pool, NULL); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); // Now that cmd buffer done we can safely destroy query_pool m_errorMonitor->SetUnexpectedError("If queryPool is not VK_NULL_HANDLE, queryPool must be a valid VkQueryPool handle"); m_errorMonitor->SetUnexpectedError("Unable to remove QueryPool obj"); vkDestroyQueryPool(m_device->handle(), query_pool, NULL); } TEST_F(VkLayerTest, PipelineInUseDestroyedSignaled) { TEST_DESCRIPTION("Delete in-use pipeline."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const VkPipelineLayoutObj pipeline_layout(m_device); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyPipeline-pipeline-00765"); // Create PSO to be used for draw-time errors below VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); // Store pipeline handle so we can actually delete it before test finishes VkPipeline delete_this_pipeline; { // Scope pipeline so it will be auto-deleted VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); delete_this_pipeline = pipe.handle(); m_commandBuffer->begin(); // Bind pipeline to cmd buffer vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); m_commandBuffer->end(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); // Submit cmd buffer and then pipeline destroyed while in-flight vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); } // Pipeline deletion triggered here m_errorMonitor->VerifyFound(); // Make sure queue finished and then actually delete pipeline vkQueueWaitIdle(m_device->m_queue); m_errorMonitor->SetUnexpectedError("If pipeline is not VK_NULL_HANDLE, pipeline must be a valid VkPipeline handle"); m_errorMonitor->SetUnexpectedError("Unable to remove Pipeline obj"); vkDestroyPipeline(m_device->handle(), delete_this_pipeline, nullptr); } TEST_F(VkLayerTest, CreateImageViewBreaksParameterCompatibilityRequirements) { TEST_DESCRIPTION( "Attempts to create an Image View with a view type that does not match the image type it is being created from."); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitState()); VkPhysicalDeviceMemoryProperties memProps; vkGetPhysicalDeviceMemoryProperties(m_device->phy().handle(), &memProps); // Test mismatch detection for image of type VK_IMAGE_TYPE_1D VkImageCreateInfo imgInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT, VK_IMAGE_TYPE_1D, VK_FORMAT_R8G8B8A8_UNORM, {1, 1, 1}, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; VkImageObj image1D(m_device); image1D.init(&imgInfo); ASSERT_TRUE(image1D.initialized()); // Initialize VkImageViewCreateInfo with mismatched viewType VkImageView imageView; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image1D.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_R8G8B8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.baseArrayLayer = 0; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // Test for error message m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCreateImageView(): pCreateInfo->viewType VK_IMAGE_VIEW_TYPE_2D is not compatible with image"); vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); // Test mismatch detection for image of type VK_IMAGE_TYPE_2D imgInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT, VK_IMAGE_TYPE_2D, VK_FORMAT_R8G8B8A8_UNORM, {1, 1, 1}, 1, 6, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; VkImageObj image2D(m_device); image2D.init(&imgInfo); ASSERT_TRUE(image2D.initialized()); // Initialize VkImageViewCreateInfo with mismatched viewType ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image2D.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_3D; ivci.format = VK_FORMAT_R8G8B8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.baseArrayLayer = 0; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // Test for error message m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCreateImageView(): pCreateInfo->viewType VK_IMAGE_VIEW_TYPE_3D is not compatible with image"); vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); // Change VkImageViewCreateInfo to different mismatched viewType ivci.viewType = VK_IMAGE_VIEW_TYPE_CUBE; ivci.subresourceRange.layerCount = 6; // Test for error message m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-01003"); vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); // Test mismatch detection for image of type VK_IMAGE_TYPE_3D imgInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT, VK_IMAGE_TYPE_3D, VK_FORMAT_R8G8B8A8_UNORM, {1, 1, 1}, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; VkImageObj image3D(m_device); image3D.init(&imgInfo); ASSERT_TRUE(image3D.initialized()); // Initialize VkImageViewCreateInfo with mismatched viewType ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image3D.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_1D; ivci.format = VK_FORMAT_R8G8B8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.baseArrayLayer = 0; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // Test for error message m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCreateImageView(): pCreateInfo->viewType VK_IMAGE_VIEW_TYPE_1D is not compatible with image"); vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); // Change VkImageViewCreateInfo to different mismatched viewType ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; // Test for error message if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-01005"); } else { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subResourceRange-01021"); } vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); // Check if the device can make the image required for this test case. VkImageFormatProperties formProps = {{0, 0, 0}, 0, 0, 0, 0}; VkResult res = vkGetPhysicalDeviceImageFormatProperties( m_device->phy().handle(), VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_TYPE_3D, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT | VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR | VK_IMAGE_CREATE_SPARSE_BINDING_BIT, &formProps); // If not, skip this part of the test. if (res || !m_device->phy().features().sparseBinding || !DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) { printf("%s %s is not supported.\n", kSkipPrefix, VK_KHR_MAINTENANCE1_EXTENSION_NAME); return; } // Initialize VkImageCreateInfo with VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR and VK_IMAGE_CREATE_SPARSE_BINDING_BIT which // are incompatible create flags. imgInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT | VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR | VK_IMAGE_CREATE_SPARSE_BINDING_BIT, VK_IMAGE_TYPE_3D, VK_FORMAT_R8G8B8A8_UNORM, {1, 1, 1}, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; VkImage imageSparse; // Creating a sparse image means we should not bind memory to it. res = vkCreateImage(m_device->device(), &imgInfo, NULL, &imageSparse); ASSERT_FALSE(res); // Initialize VkImageViewCreateInfo to create a view that will attempt to utilize VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR. ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = imageSparse; ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_R8G8B8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.baseArrayLayer = 0; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // Test for error message m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " when the VK_IMAGE_CREATE_SPARSE_BINDING_BIT, VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT, or " "VK_IMAGE_CREATE_SPARSE_ALIASED_BIT flags are enabled."); vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); // Clean up vkDestroyImage(m_device->device(), imageSparse, nullptr); } TEST_F(VkLayerTest, CreateImageViewFormatFeatureMismatch) { TEST_DESCRIPTION("Create view with a format that does not have the same features as the image format."); if (!EnableDeviceProfileLayer()) { printf("%s Failed to enable device profile layer.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(InitState()); PFN_vkSetPhysicalDeviceFormatPropertiesEXT fpvkSetPhysicalDeviceFormatPropertiesEXT = nullptr; PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT = nullptr; // Load required functions if (!LoadDeviceProfileLayer(fpvkSetPhysicalDeviceFormatPropertiesEXT, fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT)) { printf("%s Failed to device profile layer.\n", kSkipPrefix); return; } // List of features to be tested VkFormatFeatureFlagBits features[] = {VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT, VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT, VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT, VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT}; uint32_t feature_count = 4; // List of usage cases for each feature test VkImageUsageFlags usages[] = {VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_USAGE_STORAGE_BIT, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT}; // List of errors that will be thrown in order of tests run std::string optimal_error_codes[] = { "VUID-VkImageViewCreateInfo-usage-02274", "VUID-VkImageViewCreateInfo-usage-02275", "VUID-VkImageViewCreateInfo-usage-02276", "VUID-VkImageViewCreateInfo-usage-02277", }; VkFormatProperties formatProps; // First three tests uint32_t i = 0; for (i = 0; i < (feature_count - 1); i++) { // Modify formats to have mismatched features // Format for image fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R32G32B32A32_UINT, &formatProps); formatProps.optimalTilingFeatures |= features[i]; fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R32G32B32A32_UINT, formatProps); memset(&formatProps, 0, sizeof(formatProps)); // Format for view fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R32G32B32A32_SINT, &formatProps); formatProps.optimalTilingFeatures = features[(i + 1) % feature_count]; fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R32G32B32A32_SINT, formatProps); // Create image with modified format VkImageCreateInfo imgInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT, VK_IMAGE_TYPE_2D, VK_FORMAT_R32G32B32A32_UINT, {1, 1, 1}, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, usages[i], VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; VkImageObj image(m_device); image.init(&imgInfo); ASSERT_TRUE(image.initialized()); VkImageView imageView; // Initialize VkImageViewCreateInfo with modified format VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_R32G32B32A32_SINT; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.baseArrayLayer = 0; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // Test for error message m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, optimal_error_codes[i]); VkResult res = vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); if (!res) { vkDestroyImageView(m_device->device(), imageView, nullptr); } } // Test for VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT. Needs special formats // Only run this test if format supported if (!ImageFormatIsSupported(gpu(), VK_FORMAT_D24_UNORM_S8_UINT, VK_IMAGE_TILING_OPTIMAL)) { printf("%s VK_FORMAT_D24_UNORM_S8_UINT format not supported - skipped.\n", kSkipPrefix); return; } // Modify formats to have mismatched features // Format for image fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_D24_UNORM_S8_UINT, &formatProps); formatProps.optimalTilingFeatures |= features[i]; fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_D24_UNORM_S8_UINT, formatProps); memset(&formatProps, 0, sizeof(formatProps)); // Format for view fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_D32_SFLOAT_S8_UINT, &formatProps); formatProps.optimalTilingFeatures = features[(i + 1) % feature_count]; fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_D32_SFLOAT_S8_UINT, formatProps); // Create image with modified format VkImageCreateInfo imgInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT, VK_IMAGE_TYPE_2D, VK_FORMAT_D24_UNORM_S8_UINT, {1, 1, 1}, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, usages[i], VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; VkImageObj image(m_device); image.init(&imgInfo); ASSERT_TRUE(image.initialized()); VkImageView imageView; // Initialize VkImageViewCreateInfo with modified format VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_D32_SFLOAT_S8_UINT; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.baseArrayLayer = 0; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT; // Test for error message m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, optimal_error_codes[i]); VkResult res = vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); if (!res) { vkDestroyImageView(m_device->device(), imageView, nullptr); } } TEST_F(VkLayerTest, InvalidImageViewUsageCreateInfo) { TEST_DESCRIPTION("Usage modification via a chained VkImageViewUsageCreateInfo struct"); if (!EnableDeviceProfileLayer()) { printf("%s Test requires DeviceProfileLayer, unavailable - skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (!DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE2_EXTENSION_NAME)) { printf("%s Test requires API >= 1.1 or KHR_MAINTENANCE2 extension, unavailable - skipped.\n", kSkipPrefix); return; } m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitState()); PFN_vkSetPhysicalDeviceFormatPropertiesEXT fpvkSetPhysicalDeviceFormatPropertiesEXT = nullptr; PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT = nullptr; // Load required functions if (!LoadDeviceProfileLayer(fpvkSetPhysicalDeviceFormatPropertiesEXT, fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT)) { printf("%s Required extensions are not avaiable.\n", kSkipPrefix); return; } VkFormatProperties formatProps; // Ensure image format claims support for sampled and storage, excludes color attachment memset(&formatProps, 0, sizeof(formatProps)); fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R32G32B32A32_UINT, &formatProps); formatProps.optimalTilingFeatures |= (VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT | VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT); formatProps.optimalTilingFeatures = formatProps.optimalTilingFeatures & ~VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT; fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R32G32B32A32_UINT, formatProps); // Create image with sampled and storage usages VkImageCreateInfo imgInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT, VK_IMAGE_TYPE_2D, VK_FORMAT_R32G32B32A32_UINT, {1, 1, 1}, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT, VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; VkImageObj image(m_device); image.init(&imgInfo); ASSERT_TRUE(image.initialized()); // Force the imageview format to exclude storage feature, include color attachment memset(&formatProps, 0, sizeof(formatProps)); fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R32G32B32A32_SINT, &formatProps); formatProps.optimalTilingFeatures |= VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT; formatProps.optimalTilingFeatures = (formatProps.optimalTilingFeatures & ~VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT); fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R32G32B32A32_SINT, formatProps); VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_R32G32B32A32_SINT; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.baseArrayLayer = 0; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // ImageView creation should fail because view format doesn't support all the underlying image's usages VkImageView imageView; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-usage-02275"); VkResult res = vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); // Add a chained VkImageViewUsageCreateInfo to override original image usage bits, removing storage VkImageViewUsageCreateInfo usage_ci = {VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO, nullptr, VK_IMAGE_USAGE_SAMPLED_BIT}; // Link the VkImageViewUsageCreateInfo struct into the view's create info pNext chain ivci.pNext = &usage_ci; // ImageView should now succeed without error m_errorMonitor->ExpectSuccess(); res = vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyNotFound(); if (VK_SUCCESS == res) { vkDestroyImageView(m_device->device(), imageView, nullptr); } // Try a zero usage field usage_ci.usage = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCreateImageView: Chained VkImageViewUsageCreateInfo usage field must not be 0"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VkImageViewUsageCreateInfo: value of usage must not be 0"); res = vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == res) { vkDestroyImageView(m_device->device(), imageView, nullptr); } // Try a usage field with a bit not supported by underlying image usage_ci.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewUsageCreateInfo-usage-01587"); res = vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == res) { vkDestroyImageView(m_device->device(), imageView, nullptr); } // Try an illegal bit in usage field usage_ci.usage = 0x10000000 | VK_IMAGE_USAGE_SAMPLED_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewUsageCreateInfo-usage-parameter"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "UNASSIGNED-GeneralParameterError-UnrecognizedValue"); res = vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == res) { vkDestroyImageView(m_device->device(), imageView, nullptr); } } TEST_F(VkLayerTest, ImageViewInUseDestroyedSignaled) { TEST_DESCRIPTION("Delete in-use imageView."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; VkResult err; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView view; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_R8G8B8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; err = vkCreateImageView(m_device->device(), &ivci, NULL, &view); ASSERT_VK_SUCCESS(err); VkDescriptorImageInfo image_info{}; image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; image_info.imageView = view; image_info.sampler = sampler; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &image_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Create PSO to use the sampler char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform sampler2D s;\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = texture(s, vec2(1));\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyImageView-imageView-01026"); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); // Bind pipeline to cmd buffer vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, 0, nullptr); VkViewport viewport = {0, 0, 16, 16, 0, 1}; VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Submit cmd buffer then destroy sampler VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); // Submit cmd buffer and then destroy imageView while in-flight vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); vkDestroyImageView(m_device->device(), view, nullptr); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); // Now we can actually destroy imageView m_errorMonitor->SetUnexpectedError("If imageView is not VK_NULL_HANDLE, imageView must be a valid VkImageView handle"); m_errorMonitor->SetUnexpectedError("Unable to remove ImageView obj"); vkDestroyImageView(m_device->device(), view, NULL); vkDestroySampler(m_device->device(), sampler, nullptr); } TEST_F(VkLayerTest, BufferViewInUseDestroyedSignaled) { TEST_DESCRIPTION("Delete in-use bufferView."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, }); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkBuffer buffer; uint32_t queue_family_index = 0; VkBufferCreateInfo buffer_create_info = {}; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.size = 1024; buffer_create_info.usage = VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT; buffer_create_info.queueFamilyIndexCount = 1; buffer_create_info.pQueueFamilyIndices = &queue_family_index; VkResult err = vkCreateBuffer(m_device->device(), &buffer_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements memory_reqs; VkDeviceMemory buffer_memory; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vkGetBufferMemoryRequirements(m_device->device(), buffer, &memory_reqs); memory_info.allocationSize = memory_reqs.size; bool pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &buffer_memory); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, buffer_memory, 0); ASSERT_VK_SUCCESS(err); VkBufferView view; VkBufferViewCreateInfo bvci = {}; bvci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO; bvci.buffer = buffer; bvci.format = VK_FORMAT_R32_SFLOAT; bvci.range = VK_WHOLE_SIZE; err = vkCreateBufferView(m_device->device(), &bvci, NULL, &view); ASSERT_VK_SUCCESS(err); VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; descriptor_write.pTexelBufferView = &view; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0, r32f) uniform readonly imageBuffer s;\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = imageLoad(s, 0);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyBufferView-bufferView-00936"); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); VkViewport viewport = {0, 0, 16, 16, 0, 1}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); // Bind pipeline to cmd buffer vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, 0, nullptr); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); // Submit cmd buffer and then destroy bufferView while in-flight vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); vkDestroyBufferView(m_device->device(), view, nullptr); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); // Now we can actually destroy bufferView m_errorMonitor->SetUnexpectedError("If bufferView is not VK_NULL_HANDLE, bufferView must be a valid VkBufferView handle"); m_errorMonitor->SetUnexpectedError("Unable to remove BufferView obj"); vkDestroyBufferView(m_device->device(), view, NULL); vkDestroyBuffer(m_device->device(), buffer, NULL); vkFreeMemory(m_device->device(), buffer_memory, NULL); } TEST_F(VkLayerTest, SamplerInUseDestroyedSignaled) { TEST_DESCRIPTION("Delete in-use sampler."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; VkResult err; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView view; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_R8G8B8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; err = vkCreateImageView(m_device->device(), &ivci, NULL, &view); ASSERT_VK_SUCCESS(err); VkDescriptorImageInfo image_info{}; image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; image_info.imageView = view; image_info.sampler = sampler; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &image_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Create PSO to use the sampler char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform sampler2D s;\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = texture(s, vec2(1));\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroySampler-sampler-01082"); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); // Bind pipeline to cmd buffer vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, 0, nullptr); VkViewport viewport = {0, 0, 16, 16, 0, 1}; VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Submit cmd buffer then destroy sampler VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); // Submit cmd buffer and then destroy sampler while in-flight vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); vkDestroySampler(m_device->device(), sampler, nullptr); // Destroyed too soon m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); // Now we can actually destroy sampler m_errorMonitor->SetUnexpectedError("If sampler is not VK_NULL_HANDLE, sampler must be a valid VkSampler handle"); m_errorMonitor->SetUnexpectedError("Unable to remove Sampler obj"); vkDestroySampler(m_device->device(), sampler, NULL); // Destroyed for real vkDestroyImageView(m_device->device(), view, NULL); } TEST_F(VkLayerTest, UpdateDestroyDescriptorSetLayout) { TEST_DESCRIPTION("Attempt updates to descriptor sets with destroyed descriptor set layouts"); // TODO: Update to match the descriptor set layout specific VUIDs/VALIDATION_ERROR_* when present const auto kWriteDestroyedLayout = "VUID-VkWriteDescriptorSet-dstSet-00320"; const auto kCopyDstDestroyedLayout = "VUID-VkCopyDescriptorSet-dstSet-parameter"; const auto kCopySrcDestroyedLayout = "VUID-VkCopyDescriptorSet-srcSet-parameter"; ASSERT_NO_FATAL_FAILURE(Init()); // Set up the descriptor (resource) and write/copy operations to use. float data[16] = {}; VkConstantBufferObj buffer(m_device, sizeof(data), data, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT); ASSERT_TRUE(buffer.initialized()); VkDescriptorBufferInfo info = {}; info.buffer = buffer.handle(); info.range = VK_WHOLE_SIZE; VkWriteDescriptorSet write_descriptor = {}; write_descriptor.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; write_descriptor.dstSet = VK_NULL_HANDLE; // must update this write_descriptor.dstBinding = 0; write_descriptor.descriptorCount = 1; write_descriptor.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; write_descriptor.pBufferInfo = &info; VkCopyDescriptorSet copy_descriptor = {}; copy_descriptor.sType = VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET; copy_descriptor.srcSet = VK_NULL_HANDLE; // must update copy_descriptor.srcBinding = 0; copy_descriptor.dstSet = VK_NULL_HANDLE; // must update copy_descriptor.dstBinding = 0; copy_descriptor.descriptorCount = 1; // Create valid and invalid source and destination descriptor sets std::vector one_uniform_buffer = { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }; OneOffDescriptorSet good_dst(m_device, one_uniform_buffer); ASSERT_TRUE(good_dst.Initialized()); OneOffDescriptorSet bad_dst(m_device, one_uniform_buffer); // Must assert before invalidating it below ASSERT_TRUE(bad_dst.Initialized()); bad_dst.layout_ = VkDescriptorSetLayoutObj(); OneOffDescriptorSet good_src(m_device, one_uniform_buffer); ASSERT_TRUE(good_src.Initialized()); // Put valid data in the good and bad sources, simultaneously doing a positive test on write and copy operations m_errorMonitor->ExpectSuccess(); write_descriptor.dstSet = good_src.set_; vkUpdateDescriptorSets(m_device->device(), 1, &write_descriptor, 0, NULL); m_errorMonitor->VerifyNotFound(); OneOffDescriptorSet bad_src(m_device, one_uniform_buffer); ASSERT_TRUE(bad_src.Initialized()); // to complete our positive testing use copy, where above we used write. copy_descriptor.srcSet = good_src.set_; copy_descriptor.dstSet = bad_src.set_; vkUpdateDescriptorSets(m_device->device(), 0, nullptr, 1, ©_descriptor); bad_src.layout_ = VkDescriptorSetLayoutObj(); m_errorMonitor->VerifyNotFound(); // Trigger the three invalid use errors m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, kWriteDestroyedLayout); write_descriptor.dstSet = bad_dst.set_; vkUpdateDescriptorSets(m_device->device(), 1, &write_descriptor, 0, NULL); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, kCopyDstDestroyedLayout); copy_descriptor.dstSet = bad_dst.set_; vkUpdateDescriptorSets(m_device->device(), 0, nullptr, 1, ©_descriptor); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, kCopySrcDestroyedLayout); copy_descriptor.srcSet = bad_src.set_; copy_descriptor.dstSet = good_dst.set_; vkUpdateDescriptorSets(m_device->device(), 0, nullptr, 1, ©_descriptor); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, QueueForwardProgressFenceWait) { TEST_DESCRIPTION( "Call VkQueueSubmit with a semaphore that is already signaled but not waited on by the queue. Wait on a fence that has not " "yet been submitted to a queue."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const char *queue_forward_progress_message = " that was previously signaled by queue 0x"; const char *invalid_fence_wait_message = " which has not been submitted on a Queue or during acquire next image."; VkCommandBufferObj cb1(m_device, m_commandPool); cb1.begin(); cb1.end(); VkSemaphoreCreateInfo semaphore_create_info = {}; semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; VkSemaphore semaphore; ASSERT_VK_SUCCESS(vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore)); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &cb1.handle(); submit_info.signalSemaphoreCount = 1; submit_info.pSignalSemaphores = &semaphore; vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_commandBuffer->begin(); m_commandBuffer->end(); submit_info.pCommandBuffers = &m_commandBuffer->handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, queue_forward_progress_message); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); VkFenceCreateInfo fence_create_info = {}; fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; VkFence fence; ASSERT_VK_SUCCESS(vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence)); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, invalid_fence_wait_message); vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX); m_errorMonitor->VerifyFound(); vkDeviceWaitIdle(m_device->device()); vkDestroyFence(m_device->device(), fence, nullptr); vkDestroySemaphore(m_device->device(), semaphore, nullptr); } TEST_F(VkLayerTest, FramebufferIncompatible) { TEST_DESCRIPTION( "Bind a secondary command buffer with a framebuffer that does not match the framebuffer for the active renderpass."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // A renderpass with one color attachment. VkAttachmentDescription attachment = {0, VK_FORMAT_B8G8R8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkAttachmentReference att_ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &att_ref, nullptr, nullptr, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &attachment, 1, &subpass, 0, nullptr}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); // A compatible framebuffer. VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageViewCreateInfo ivci = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, nullptr, 0, image.handle(), VK_IMAGE_VIEW_TYPE_2D, VK_FORMAT_B8G8R8A8_UNORM, {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY}, {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}, }; VkImageView view; err = vkCreateImageView(m_device->device(), &ivci, nullptr, &view); ASSERT_VK_SUCCESS(err); VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &view, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb); ASSERT_VK_SUCCESS(err); VkCommandBufferAllocateInfo cbai = {}; cbai.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; cbai.commandPool = m_commandPool->handle(); cbai.level = VK_COMMAND_BUFFER_LEVEL_SECONDARY; cbai.commandBufferCount = 1; VkCommandBuffer sec_cb; err = vkAllocateCommandBuffers(m_device->device(), &cbai, &sec_cb); ASSERT_VK_SUCCESS(err); VkCommandBufferBeginInfo cbbi = {}; VkCommandBufferInheritanceInfo cbii = {}; cbii.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; cbii.renderPass = renderPass(); cbii.framebuffer = fb; cbbi.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cbbi.pNext = NULL; cbbi.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT; cbbi.pInheritanceInfo = &cbii; vkBeginCommandBuffer(sec_cb, &cbbi); vkEndCommandBuffer(sec_cb); VkCommandBufferBeginInfo cbbi2 = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, 0, nullptr}; vkBeginCommandBuffer(m_commandBuffer->handle(), &cbbi2); vkCmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is not the same as the primary command buffer's current active framebuffer "); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &sec_cb); m_errorMonitor->VerifyFound(); // Cleanup vkCmdEndRenderPass(m_commandBuffer->handle()); vkEndCommandBuffer(m_commandBuffer->handle()); vkDestroyImageView(m_device->device(), view, NULL); vkDestroyRenderPass(m_device->device(), rp, NULL); vkDestroyFramebuffer(m_device->device(), fb, NULL); } TEST_F(VkLayerTest, RenderPassMissingAttachment) { TEST_DESCRIPTION("Begin render pass with missing framebuffer attachment"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Create a renderPass with a single color attachment VkAttachmentReference attach = {}; attach.layout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription subpass = {}; subpass.pColorAttachments = &attach; VkRenderPassCreateInfo rpci = {}; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 1; VkAttachmentDescription attach_desc = {}; attach_desc.format = VK_FORMAT_B8G8R8A8_UNORM; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; rpci.pAttachments = &attach_desc; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); ASSERT_VK_SUCCESS(err); auto createView = lvl_init_struct(); createView.image = m_renderTargets[0]->handle(); createView.viewType = VK_IMAGE_VIEW_TYPE_2D; createView.format = VK_FORMAT_B8G8R8A8_UNORM; createView.components.r = VK_COMPONENT_SWIZZLE_R; createView.components.g = VK_COMPONENT_SWIZZLE_G; createView.components.b = VK_COMPONENT_SWIZZLE_B; createView.components.a = VK_COMPONENT_SWIZZLE_A; createView.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}; createView.flags = 0; VkImageView iv; vkCreateImageView(m_device->handle(), &createView, nullptr, &iv); auto fb_info = lvl_init_struct(); fb_info.renderPass = rp; fb_info.attachmentCount = 1; fb_info.pAttachments = &iv; fb_info.width = 100; fb_info.height = 100; fb_info.layers = 1; // Create the framebuffer then destory the view it uses. VkFramebuffer fb; err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); vkDestroyImageView(device(), iv, NULL); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRenderPassBeginInfo-framebuffer-parameter"); auto rpbi = lvl_init_struct(); rpbi.renderPass = rp; rpbi.framebuffer = fb; rpbi.renderArea = {{0, 0}, {32, 32}}; m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); // Don't call vkCmdEndRenderPass; as the begin has been "skipped" based on the error condition m_errorMonitor->VerifyFound(); m_commandBuffer->end(); vkDestroyFramebuffer(m_device->device(), fb, NULL); vkDestroyRenderPass(m_device->device(), rp, NULL); } TEST_F(VkLayerTest, ColorBlendInvalidLogicOp) { TEST_DESCRIPTION("Attempt to use invalid VkPipelineColorBlendStateCreateInfo::logicOp value."); ASSERT_NO_FATAL_FAILURE(Init()); // enables all supported features ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); if (!m_device->phy().features().logicOp) { printf("%s Device does not support logicOp feature; skipped.\n", kSkipPrefix); return; } const auto set_shading_enable = [](CreatePipelineHelper &helper) { helper.cb_ci_.logicOpEnable = VK_TRUE; helper.cb_ci_.logicOp = static_cast(VK_LOGIC_OP_END_RANGE + 1); // invalid logicOp to be tested }; CreatePipelineHelper::OneshotTest(*this, set_shading_enable, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineColorBlendStateCreateInfo-logicOpEnable-00607"); } TEST_F(VkLayerTest, ColorBlendUnsupportedLogicOp) { TEST_DESCRIPTION("Attempt enabling VkPipelineColorBlendStateCreateInfo::logicOpEnable when logicOp feature is disabled."); VkPhysicalDeviceFeatures features{}; ASSERT_NO_FATAL_FAILURE(Init(&features)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const auto set_shading_enable = [](CreatePipelineHelper &helper) { helper.cb_ci_.logicOpEnable = VK_TRUE; }; CreatePipelineHelper::OneshotTest(*this, set_shading_enable, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineColorBlendStateCreateInfo-logicOpEnable-00606"); } TEST_F(VkLayerTest, ColorBlendUnsupportedDualSourceBlend) { TEST_DESCRIPTION("Attempt to use dual-source blending when dualSrcBlend feature is disabled."); VkPhysicalDeviceFeatures features{}; ASSERT_NO_FATAL_FAILURE(Init(&features)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const auto set_dsb_src_color_enable = [](CreatePipelineHelper &helper) { helper.cb_attachments_.blendEnable = VK_TRUE; helper.cb_attachments_.srcColorBlendFactor = VK_BLEND_FACTOR_SRC1_COLOR; // bad! helper.cb_attachments_.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR; helper.cb_attachments_.colorBlendOp = VK_BLEND_OP_ADD; helper.cb_attachments_.srcAlphaBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA; helper.cb_attachments_.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; helper.cb_attachments_.alphaBlendOp = VK_BLEND_OP_ADD; }; CreatePipelineHelper::OneshotTest(*this, set_dsb_src_color_enable, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineColorBlendAttachmentState-srcColorBlendFactor-00608"); const auto set_dsb_dst_color_enable = [](CreatePipelineHelper &helper) { helper.cb_attachments_.blendEnable = VK_TRUE; helper.cb_attachments_.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_COLOR; helper.cb_attachments_.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR; // bad helper.cb_attachments_.colorBlendOp = VK_BLEND_OP_ADD; helper.cb_attachments_.srcAlphaBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA; helper.cb_attachments_.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; helper.cb_attachments_.alphaBlendOp = VK_BLEND_OP_ADD; }; CreatePipelineHelper::OneshotTest(*this, set_dsb_dst_color_enable, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineColorBlendAttachmentState-dstColorBlendFactor-00609"); const auto set_dsb_src_alpha_enable = [](CreatePipelineHelper &helper) { helper.cb_attachments_.blendEnable = VK_TRUE; helper.cb_attachments_.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_COLOR; helper.cb_attachments_.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR; helper.cb_attachments_.colorBlendOp = VK_BLEND_OP_ADD; helper.cb_attachments_.srcAlphaBlendFactor = VK_BLEND_FACTOR_SRC1_ALPHA; // bad helper.cb_attachments_.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; helper.cb_attachments_.alphaBlendOp = VK_BLEND_OP_ADD; }; CreatePipelineHelper::OneshotTest(*this, set_dsb_src_alpha_enable, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineColorBlendAttachmentState-srcAlphaBlendFactor-00610"); const auto set_dsb_dst_alpha_enable = [](CreatePipelineHelper &helper) { helper.cb_attachments_.blendEnable = VK_TRUE; helper.cb_attachments_.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_COLOR; helper.cb_attachments_.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR; helper.cb_attachments_.colorBlendOp = VK_BLEND_OP_ADD; helper.cb_attachments_.srcAlphaBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA; helper.cb_attachments_.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA; // bad! helper.cb_attachments_.alphaBlendOp = VK_BLEND_OP_ADD; }; CreatePipelineHelper::OneshotTest(*this, set_dsb_dst_alpha_enable, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineColorBlendAttachmentState-dstAlphaBlendFactor-00611"); } #if GTEST_IS_THREADSAFE struct thread_data_struct { VkCommandBuffer commandBuffer; VkDevice device; VkEvent event; bool bailout; }; extern "C" void *AddToCommandBuffer(void *arg) { struct thread_data_struct *data = (struct thread_data_struct *)arg; for (int i = 0; i < 80000; i++) { vkCmdSetEvent(data->commandBuffer, data->event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT); if (data->bailout) { break; } } return NULL; } TEST_F(VkLayerTest, ThreadCommandBufferCollision) { test_platform_thread thread; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "THREADING ERROR"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Calls AllocateCommandBuffers VkCommandBufferObj commandBuffer(m_device, m_commandPool); commandBuffer.begin(); VkEventCreateInfo event_info; VkEvent event; VkResult err; memset(&event_info, 0, sizeof(event_info)); event_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; err = vkCreateEvent(device(), &event_info, NULL, &event); ASSERT_VK_SUCCESS(err); err = vkResetEvent(device(), event); ASSERT_VK_SUCCESS(err); struct thread_data_struct data; data.commandBuffer = commandBuffer.handle(); data.event = event; data.bailout = false; m_errorMonitor->SetBailout(&data.bailout); // First do some correct operations using multiple threads. // Add many entries to command buffer from another thread. test_platform_thread_create(&thread, AddToCommandBuffer, (void *)&data); // Make non-conflicting calls from this thread at the same time. for (int i = 0; i < 80000; i++) { uint32_t count; vkEnumeratePhysicalDevices(instance(), &count, NULL); } test_platform_thread_join(thread, NULL); // Then do some incorrect operations using multiple threads. // Add many entries to command buffer from another thread. test_platform_thread_create(&thread, AddToCommandBuffer, (void *)&data); // Add many entries to command buffer from this thread at the same time. AddToCommandBuffer(&data); test_platform_thread_join(thread, NULL); commandBuffer.end(); m_errorMonitor->SetBailout(NULL); m_errorMonitor->VerifyFound(); vkDestroyEvent(device(), event, NULL); } #endif // GTEST_IS_THREADSAFE TEST_F(VkLayerTest, InvalidSPIRVCodeSize) { TEST_DESCRIPTION("Test that errors are produced for a spirv modules with invalid code sizes"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Invalid SPIR-V header"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkShaderModule module; VkShaderModuleCreateInfo moduleCreateInfo; struct icd_spv_header spv; spv.magic = ICD_SPV_MAGIC; spv.version = ICD_SPV_VERSION; spv.gen_magic = 0; moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; moduleCreateInfo.pNext = NULL; moduleCreateInfo.pCode = (const uint32_t *)&spv; moduleCreateInfo.codeSize = 4; moduleCreateInfo.flags = 0; vkCreateShaderModule(m_device->device(), &moduleCreateInfo, NULL, &module); m_errorMonitor->VerifyFound(); char const *vsSource = "#version 450\n" "\n" "layout(location=0) out float x;\n" "void main(){\n" " gl_Position = vec4(1);\n" " x = 0;\n" "}\n"; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkShaderModuleCreateInfo-pCode-01376"); std::vector shader; VkShaderModuleCreateInfo module_create_info; VkShaderModule shader_module; module_create_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; module_create_info.pNext = NULL; this->GLSLtoSPV(VK_SHADER_STAGE_VERTEX_BIT, vsSource, shader); module_create_info.pCode = shader.data(); // Introduce failure by making codeSize a non-multiple of 4 module_create_info.codeSize = shader.size() * sizeof(unsigned int) - 1; module_create_info.flags = 0; vkCreateShaderModule(m_device->handle(), &module_create_info, NULL, &shader_module); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidSPIRVMagic) { TEST_DESCRIPTION("Test that an error is produced for a spirv module with a bad magic number"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Invalid SPIR-V magic number"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkShaderModule module; VkShaderModuleCreateInfo moduleCreateInfo; struct icd_spv_header spv; spv.magic = (uint32_t)~ICD_SPV_MAGIC; spv.version = ICD_SPV_VERSION; spv.gen_magic = 0; moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; moduleCreateInfo.pNext = NULL; moduleCreateInfo.pCode = (const uint32_t *)&spv; moduleCreateInfo.codeSize = sizeof(spv) + 16; moduleCreateInfo.flags = 0; vkCreateShaderModule(m_device->device(), &moduleCreateInfo, NULL, &module); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineVertexOutputNotConsumed) { TEST_DESCRIPTION("Test that a warning is produced for a vertex output that is not consumed by the fragment stage"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, "not consumed by fragment shader"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "layout(location=0) out float x;\n" "void main(){\n" " gl_Position = vec4(1);\n" " x = 0;\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkPositiveLayerTest, CreatePipelineComplexTypes) { TEST_DESCRIPTION("Smoke test for complex types across VS/FS boundary"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); if (!m_device->phy().features().tessellationShader) { printf("%s Device does not support tessellation shaders; skipped.\n", kSkipPrefix); return; } m_errorMonitor->ExpectSuccess(); char const *vsSource = "#version 450\n" "void main() {}"; char const *tcsSource = "#version 450\n" "layout(vertices=3) out;\n" "struct S { int x; };\n" "layout(location=2) patch out B { S s; } b;\n" "void main() {\n" " gl_TessLevelOuter[0] = gl_TessLevelOuter[1] = gl_TessLevelOuter[2] = 1;\n" " gl_TessLevelInner[0] = 1;\n" " b.s.x = 1;\n" "}\n"; char const *tesSource = "#version 450\n" "layout(triangles, equal_spacing, cw) in;\n" "struct S { int x; };\n" "layout(location=2) patch in B { S s; } b;\n" "void main() { gl_Position = vec4(b.s.x); }\n"; char const *fsSource = "#version 450\n" "layout(location=0) out vec4 c;\n" "void main() { c = vec4(1); }\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj tcs(m_device, tcsSource, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, this); VkShaderObj tes(m_device, tesSource, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineInputAssemblyStateCreateInfo iasci{VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, nullptr, 0, VK_PRIMITIVE_TOPOLOGY_PATCH_LIST, VK_FALSE}; VkPipelineTessellationStateCreateInfo tsci{VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, nullptr, 0, 3}; VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&tcs); pipe.AddShader(&tes); pipe.AddShader(&fs); pipe.SetInputAssembly(&iasci); pipe.SetTessellation(&tsci); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, CreatePipelineCheckShaderBadSpecialization) { TEST_DESCRIPTION("Challenge core_validation with shader validation issues related to vkCreateGraphicsPipelines."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const char *bad_specialization_message = "Specialization entry 0 (for constant id 0) references memory outside provided specialization data "; char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout (constant_id = 0) const float r = 0.0f;\n" "layout(location = 0) out vec4 uFragColor;\n" "void main(){\n" " uFragColor = vec4(r,1,0,1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); const VkPipelineLayoutObj pipeline_layout(m_device); VkPipelineViewportStateCreateInfo vp_state_create_info = {}; vp_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; vp_state_create_info.viewportCount = 1; VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; vp_state_create_info.pViewports = &viewport; vp_state_create_info.scissorCount = 1; VkDynamicState scissor_state = VK_DYNAMIC_STATE_SCISSOR; VkPipelineDynamicStateCreateInfo pipeline_dynamic_state_create_info = {}; pipeline_dynamic_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; pipeline_dynamic_state_create_info.dynamicStateCount = 1; pipeline_dynamic_state_create_info.pDynamicStates = &scissor_state; VkPipelineShaderStageCreateInfo shader_stage_create_info[2] = {vs.GetStageCreateInfo(), fs.GetStageCreateInfo()}; VkPipelineVertexInputStateCreateInfo vertex_input_create_info = {}; vertex_input_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; VkPipelineInputAssemblyStateCreateInfo input_assembly_create_info = {}; input_assembly_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; input_assembly_create_info.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; VkPipelineRasterizationStateCreateInfo rasterization_state_create_info = {}; rasterization_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rasterization_state_create_info.pNext = nullptr; rasterization_state_create_info.lineWidth = 1.0f; rasterization_state_create_info.rasterizerDiscardEnable = true; VkPipelineColorBlendAttachmentState color_blend_attachment_state = {}; color_blend_attachment_state.blendEnable = VK_FALSE; color_blend_attachment_state.colorWriteMask = 0xf; VkPipelineColorBlendStateCreateInfo color_blend_state_create_info = {}; color_blend_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; color_blend_state_create_info.attachmentCount = 1; color_blend_state_create_info.pAttachments = &color_blend_attachment_state; VkGraphicsPipelineCreateInfo graphicspipe_create_info = {}; graphicspipe_create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; graphicspipe_create_info.stageCount = 2; graphicspipe_create_info.pStages = shader_stage_create_info; graphicspipe_create_info.pVertexInputState = &vertex_input_create_info; graphicspipe_create_info.pInputAssemblyState = &input_assembly_create_info; graphicspipe_create_info.pViewportState = &vp_state_create_info; graphicspipe_create_info.pRasterizationState = &rasterization_state_create_info; graphicspipe_create_info.pColorBlendState = &color_blend_state_create_info; graphicspipe_create_info.pDynamicState = &pipeline_dynamic_state_create_info; graphicspipe_create_info.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT; graphicspipe_create_info.layout = pipeline_layout.handle(); graphicspipe_create_info.renderPass = renderPass(); VkPipelineCacheCreateInfo pipeline_cache_create_info = {}; pipeline_cache_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; VkPipelineCache pipelineCache; ASSERT_VK_SUCCESS(vkCreatePipelineCache(m_device->device(), &pipeline_cache_create_info, nullptr, &pipelineCache)); // This structure maps constant ids to data locations. const VkSpecializationMapEntry entry = // id, offset, size {0, 4, sizeof(uint32_t)}; // Challenge core validation by using a bogus offset. uint32_t data = 1; // Set up the info describing spec map and data const VkSpecializationInfo specialization_info = { 1, &entry, 1 * sizeof(float), &data, }; shader_stage_create_info[0].pSpecializationInfo = &specialization_info; VkPipeline pipeline; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, bad_specialization_message); vkCreateGraphicsPipelines(m_device->device(), pipelineCache, 1, &graphicspipe_create_info, nullptr, &pipeline); m_errorMonitor->VerifyFound(); vkDestroyPipelineCache(m_device->device(), pipelineCache, nullptr); } TEST_F(VkLayerTest, CreatePipelineCheckShaderDescriptorTypeMismatch) { TEST_DESCRIPTION("Challenge core_validation with shader validation issues related to vkCreateGraphicsPipelines."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const char *descriptor_type_mismatch_message = "Type mismatch on descriptor slot 0.0 "; OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); char const *vsSource = "#version 450\n" "\n" "layout (std140, set = 0, binding = 0) uniform buf {\n" " mat4 mvp;\n" "} ubuf;\n" "void main(){\n" " gl_Position = ubuf.mvp * vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location = 0) out vec4 uFragColor;\n" "void main(){\n" " uFragColor = vec4(0,1,0,1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, descriptor_type_mismatch_message); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineCheckShaderDescriptorNotAccessible) { TEST_DESCRIPTION( "Create a pipeline in which a descriptor used by a shader stage does not include that stage in its stageFlags."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const char *descriptor_not_accessible_message = "Shader uses descriptor slot 0.0 "; OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT /*!*/, nullptr}, }); char const *vsSource = "#version 450\n" "\n" "layout (std140, set = 0, binding = 0) uniform buf {\n" " mat4 mvp;\n" "} ubuf;\n" "void main(){\n" " gl_Position = ubuf.mvp * vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location = 0) out vec4 uFragColor;\n" "void main(){\n" " uFragColor = vec4(0,1,0,1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, descriptor_not_accessible_message); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineCheckShaderPushConstantNotAccessible) { TEST_DESCRIPTION( "Create a graphics pipeline in which a push constant range containing a push constant block member is not accessible from " "the current shader stage."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const char *push_constant_not_accessible_message = "Push constant range covering variable starting at offset 0 not accessible from stage VK_SHADER_STAGE_VERTEX_BIT"; char const *vsSource = "#version 450\n" "\n" "layout(push_constant, std430) uniform foo { float x; } consts;\n" "void main(){\n" " gl_Position = vec4(consts.x);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location = 0) out vec4 uFragColor;\n" "void main(){\n" " uFragColor = vec4(0,1,0,1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); // Set up a push constant range VkPushConstantRange push_constant_range = {}; // Set to the wrong stage to challenge core_validation push_constant_range.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; push_constant_range.size = 4; const VkPipelineLayoutObj pipeline_layout(m_device, {}, {push_constant_range}); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, push_constant_not_accessible_message); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineCheckShaderNotEnabled) { TEST_DESCRIPTION( "Create a graphics pipeline in which a capability declared by the shader requires a feature not enabled on the device."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const char *feature_not_enabled_message = "Shader requires VkPhysicalDeviceFeatures::shaderFloat64 but is not enabled on the device"; // Some awkward steps are required to test with custom device features. std::vector device_extension_names; auto features = m_device->phy().features(); // Disable support for 64 bit floats features.shaderFloat64 = false; // The sacrificial device object VkDeviceObj test_device(0, gpu(), device_extension_names, &features); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " dvec4 green = vec4(0.0, 1.0, 0.0, 1.0);\n" " color = vec4(green);\n" "}\n"; VkShaderObj vs(&test_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(&test_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkRenderpassObj render_pass(&test_device); VkPipelineObj pipe(&test_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); const VkPipelineLayoutObj pipeline_layout(&test_device); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, feature_not_enabled_message); pipe.CreateVKPipeline(pipeline_layout.handle(), render_pass.handle()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreateShaderModuleCheckBadCapability) { TEST_DESCRIPTION("Create a shader in which a capability declared by the shader is not supported."); // Note that this failure message comes from spirv-tools, specifically the validator. ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const std::string spv_source = R"( OpCapability ImageRect OpEntryPoint Vertex %main "main" %main = OpFunction %void None %3 OpReturn OpFunctionEnd )"; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Capability ImageRect is not allowed by Vulkan"); std::vector spv; VkShaderModuleCreateInfo module_create_info; VkShaderModule shader_module; module_create_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; module_create_info.pNext = NULL; ASMtoSPV(SPV_ENV_VULKAN_1_0, 0, spv_source.data(), spv); module_create_info.pCode = spv.data(); module_create_info.codeSize = spv.size() * sizeof(unsigned int); module_create_info.flags = 0; VkResult err = vkCreateShaderModule(m_device->handle(), &module_create_info, NULL, &shader_module); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyShaderModule(m_device->handle(), shader_module, NULL); } } TEST_F(VkPositiveLayerTest, ShaderRelaxedBlockLayout) { // This is a positive test, no errors expected // Verifies the ability to relax block layout rules with a shader that requires them to be relaxed TEST_DESCRIPTION("Create a shader that requires relaxed block layout."); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // The Relaxed Block Layout extension was promoted to core in 1.1. // Go ahead and check for it and turn it on in case a 1.0 device has it. if (!DeviceExtensionSupported(gpu(), nullptr, VK_KHR_RELAXED_BLOCK_LAYOUT_EXTENSION_NAME)) { printf("%s Extension %s not supported, skipping this pass. \n", kSkipPrefix, VK_KHR_RELAXED_BLOCK_LAYOUT_EXTENSION_NAME); return; } m_device_extension_names.push_back(VK_KHR_RELAXED_BLOCK_LAYOUT_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Vertex shader requiring relaxed layout. // Without relaxed layout, we would expect a message like: // "Structure id 2 decorated as Block for variable in Uniform storage class // must follow standard uniform buffer layout rules: member 1 at offset 4 is not aligned to 16" const std::string spv_source = R"( OpCapability Shader OpMemoryModel Logical GLSL450 OpEntryPoint Vertex %main "main" OpSource GLSL 450 OpMemberDecorate %S 0 Offset 0 OpMemberDecorate %S 1 Offset 4 OpDecorate %S Block OpDecorate %B DescriptorSet 0 OpDecorate %B Binding 0 %void = OpTypeVoid %3 = OpTypeFunction %void %float = OpTypeFloat 32 %v3float = OpTypeVector %float 3 %S = OpTypeStruct %float %v3float %_ptr_Uniform_S = OpTypePointer Uniform %S %B = OpVariable %_ptr_Uniform_S Uniform %main = OpFunction %void None %3 %5 = OpLabel OpReturn OpFunctionEnd )"; std::vector spv; VkShaderModuleCreateInfo module_create_info; VkShaderModule shader_module; module_create_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; module_create_info.pNext = NULL; ASMtoSPV(SPV_ENV_VULKAN_1_0, 0, spv_source.data(), spv); module_create_info.pCode = spv.data(); module_create_info.codeSize = spv.size() * sizeof(unsigned int); module_create_info.flags = 0; m_errorMonitor->ExpectSuccess(); VkResult err = vkCreateShaderModule(m_device->handle(), &module_create_info, NULL, &shader_module); m_errorMonitor->VerifyNotFound(); if (err == VK_SUCCESS) { vkDestroyShaderModule(m_device->handle(), shader_module, NULL); } } TEST_F(VkPositiveLayerTest, ShaderScalarBlockLayout) { // This is a positive test, no errors expected // Verifies the ability to scalar block layout rules with a shader that requires them to be relaxed TEST_DESCRIPTION("Create a shader that requires scalar block layout."); // Enable req'd extensions if (!InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { printf("%s Extension %s not supported, skipping this pass. \n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Check for the Scalar Block Layout extension and turn it on if it's available if (!DeviceExtensionSupported(gpu(), nullptr, VK_EXT_SCALAR_BLOCK_LAYOUT_EXTENSION_NAME)) { printf("%s Extension %s not supported, skipping this pass. \n", kSkipPrefix, VK_EXT_SCALAR_BLOCK_LAYOUT_EXTENSION_NAME); return; } m_device_extension_names.push_back(VK_EXT_SCALAR_BLOCK_LAYOUT_EXTENSION_NAME); PFN_vkGetPhysicalDeviceFeatures2 vkGetPhysicalDeviceFeatures2 = (PFN_vkGetPhysicalDeviceFeatures2)vkGetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); auto scalar_block_features = lvl_init_struct(NULL); scalar_block_features.scalarBlockLayout = VK_TRUE; auto query_features2 = lvl_init_struct(&scalar_block_features); vkGetPhysicalDeviceFeatures2(gpu(), &query_features2); auto set_features2 = lvl_init_struct(&scalar_block_features); ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &set_features2)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Vertex shader requiring scalar layout. // Without scalar layout, we would expect a message like: // "Structure id 2 decorated as Block for variable in Uniform storage class // must follow standard uniform buffer layout rules: member 1 at offset 4 is not aligned to 16" const std::string spv_source = R"( OpCapability Shader OpMemoryModel Logical GLSL450 OpEntryPoint Vertex %main "main" OpSource GLSL 450 OpMemberDecorate %S 0 Offset 0 OpMemberDecorate %S 1 Offset 4 OpMemberDecorate %S 2 Offset 8 OpDecorate %S Block OpDecorate %B DescriptorSet 0 OpDecorate %B Binding 0 %void = OpTypeVoid %3 = OpTypeFunction %void %float = OpTypeFloat 32 %v3float = OpTypeVector %float 3 %S = OpTypeStruct %float %float %v3float %_ptr_Uniform_S = OpTypePointer Uniform %S %B = OpVariable %_ptr_Uniform_S Uniform %main = OpFunction %void None %3 %5 = OpLabel OpReturn OpFunctionEnd )"; std::vector spv; VkShaderModuleCreateInfo module_create_info; VkShaderModule shader_module; module_create_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; module_create_info.pNext = NULL; ASMtoSPV(SPV_ENV_VULKAN_1_0, 0, spv_source.data(), spv); module_create_info.pCode = spv.data(); module_create_info.codeSize = spv.size() * sizeof(unsigned int); module_create_info.flags = 0; m_errorMonitor->ExpectSuccess(); VkResult err = vkCreateShaderModule(m_device->handle(), &module_create_info, NULL, &shader_module); m_errorMonitor->VerifyNotFound(); if (err == VK_SUCCESS) { vkDestroyShaderModule(m_device->handle(), shader_module, NULL); } } TEST_F(VkPositiveLayerTest, SpirvGroupDecorations) { TEST_DESCRIPTION("Test shader validation support for group decorations."); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const std::string spv_source = R"( OpCapability Shader OpMemoryModel Logical GLSL450 OpEntryPoint GLCompute %main "main" %gl_GlobalInvocationID OpExecutionMode %main LocalSize 1 1 1 OpSource GLSL 430 OpName %main "main" OpName %gl_GlobalInvocationID "gl_GlobalInvocationID" OpDecorate %gl_GlobalInvocationID BuiltIn GlobalInvocationId OpDecorate %_runtimearr_float ArrayStride 4 OpDecorate %4 BufferBlock OpDecorate %5 Offset 0 %4 = OpDecorationGroup %5 = OpDecorationGroup OpGroupDecorate %4 %_struct_6 %_struct_7 %_struct_8 %_struct_9 %_struct_10 %_struct_11 OpGroupMemberDecorate %5 %_struct_6 0 %_struct_7 0 %_struct_8 0 %_struct_9 0 %_struct_10 0 %_struct_11 0 OpDecorate %12 DescriptorSet 0 OpDecorate %13 DescriptorSet 0 OpDecorate %13 NonWritable OpDecorate %13 Restrict %14 = OpDecorationGroup %12 = OpDecorationGroup %13 = OpDecorationGroup OpGroupDecorate %12 %15 OpGroupDecorate %12 %15 OpGroupDecorate %12 %15 OpDecorate %15 DescriptorSet 0 OpDecorate %15 Binding 5 OpGroupDecorate %14 %16 OpDecorate %16 DescriptorSet 0 OpDecorate %16 Binding 0 OpGroupDecorate %12 %17 OpDecorate %17 Binding 1 OpGroupDecorate %13 %18 %19 OpDecorate %18 Binding 2 OpDecorate %19 Binding 3 OpGroupDecorate %14 %20 OpGroupDecorate %12 %20 OpGroupDecorate %13 %20 OpDecorate %20 Binding 4 %bool = OpTypeBool %void = OpTypeVoid %23 = OpTypeFunction %void %uint = OpTypeInt 32 0 %int = OpTypeInt 32 1 %float = OpTypeFloat 32 %v3uint = OpTypeVector %uint 3 %v3float = OpTypeVector %float 3 %_ptr_Input_v3uint = OpTypePointer Input %v3uint %_ptr_Uniform_int = OpTypePointer Uniform %int %_ptr_Uniform_float = OpTypePointer Uniform %float %_runtimearr_int = OpTypeRuntimeArray %int %_runtimearr_float = OpTypeRuntimeArray %float %gl_GlobalInvocationID = OpVariable %_ptr_Input_v3uint Input %int_0 = OpConstant %int 0 %_struct_6 = OpTypeStruct %_runtimearr_float %_ptr_Uniform__struct_6 = OpTypePointer Uniform %_struct_6 %15 = OpVariable %_ptr_Uniform__struct_6 Uniform %_struct_7 = OpTypeStruct %_runtimearr_float %_ptr_Uniform__struct_7 = OpTypePointer Uniform %_struct_7 %16 = OpVariable %_ptr_Uniform__struct_7 Uniform %_struct_8 = OpTypeStruct %_runtimearr_float %_ptr_Uniform__struct_8 = OpTypePointer Uniform %_struct_8 %17 = OpVariable %_ptr_Uniform__struct_8 Uniform %_struct_9 = OpTypeStruct %_runtimearr_float %_ptr_Uniform__struct_9 = OpTypePointer Uniform %_struct_9 %18 = OpVariable %_ptr_Uniform__struct_9 Uniform %_struct_10 = OpTypeStruct %_runtimearr_float %_ptr_Uniform__struct_10 = OpTypePointer Uniform %_struct_10 %19 = OpVariable %_ptr_Uniform__struct_10 Uniform %_struct_11 = OpTypeStruct %_runtimearr_float %_ptr_Uniform__struct_11 = OpTypePointer Uniform %_struct_11 %20 = OpVariable %_ptr_Uniform__struct_11 Uniform %main = OpFunction %void None %23 %40 = OpLabel %41 = OpLoad %v3uint %gl_GlobalInvocationID %42 = OpCompositeExtract %uint %41 0 %43 = OpAccessChain %_ptr_Uniform_float %16 %int_0 %42 %44 = OpAccessChain %_ptr_Uniform_float %17 %int_0 %42 %45 = OpAccessChain %_ptr_Uniform_float %18 %int_0 %42 %46 = OpAccessChain %_ptr_Uniform_float %19 %int_0 %42 %47 = OpAccessChain %_ptr_Uniform_float %20 %int_0 %42 %48 = OpAccessChain %_ptr_Uniform_float %15 %int_0 %42 %49 = OpLoad %float %43 %50 = OpLoad %float %44 %51 = OpLoad %float %45 %52 = OpLoad %float %46 %53 = OpLoad %float %47 %54 = OpFAdd %float %49 %50 %55 = OpFAdd %float %54 %51 %56 = OpFAdd %float %55 %52 %57 = OpFAdd %float %56 %53 OpStore %48 %57 OpReturn OpFunctionEnd )"; // CreateDescriptorSetLayout VkDescriptorSetLayoutBinding dslb[6] = {}; for (auto i = 0; i < 6; i++) { dslb[i].binding = i; dslb[i].descriptorCount = 1; dslb[i].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; dslb[i].pImmutableSamplers = NULL; dslb[i].stageFlags = VK_SHADER_STAGE_COMPUTE_BIT | VK_SHADER_STAGE_ALL; } VkDescriptorSetLayoutCreateInfo ds_layout_ci = {}; ds_layout_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; ds_layout_ci.flags = 0; ds_layout_ci.bindingCount = 6; ds_layout_ci.pBindings = dslb; VkDescriptorSetLayout ds_layout = {}; vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); // CreatePipelineLayout VkPipelineLayoutCreateInfo pipeline_layout_ci = {}; pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipeline_layout_ci.pNext = NULL; pipeline_layout_ci.flags = 0; pipeline_layout_ci.setLayoutCount = 1; pipeline_layout_ci.pSetLayouts = &ds_layout; VkPipelineLayout pipeline_layout = VK_NULL_HANDLE; vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); // Create DescriptorPool VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; ds_type_count.descriptorCount = 6; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool = VK_NULL_HANDLE; vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); // AllocateDescriptorSets VkDescriptorSetAllocateInfo ds_alloc_info = {}; ds_alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; ds_alloc_info.descriptorSetCount = 1; ds_alloc_info.descriptorPool = ds_pool; ds_alloc_info.pSetLayouts = &ds_layout; VkDescriptorSet descriptorSet; vkAllocateDescriptorSets(m_device->device(), &ds_alloc_info, &descriptorSet); // CreateShaderModule std::vector spv; VkShaderModuleCreateInfo module_create_info; VkShaderModule shader_module; module_create_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; module_create_info.pNext = NULL; ASMtoSPV(SPV_ENV_VULKAN_1_0, 0, spv_source.data(), spv); module_create_info.pCode = spv.data(); module_create_info.codeSize = spv.size() * sizeof(unsigned int); module_create_info.flags = 0; vkCreateShaderModule(m_device->handle(), &module_create_info, NULL, &shader_module); // CreateComputePipelines VkComputePipelineCreateInfo pipeline_info = {}; pipeline_info.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO; pipeline_info.pNext = nullptr; pipeline_info.flags = 0; pipeline_info.layout = pipeline_layout; pipeline_info.basePipelineHandle = VK_NULL_HANDLE; pipeline_info.basePipelineIndex = -1; pipeline_info.stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; pipeline_info.stage.pNext = nullptr; pipeline_info.stage.flags = 0; pipeline_info.stage.stage = VK_SHADER_STAGE_COMPUTE_BIT; pipeline_info.stage.module = shader_module; pipeline_info.stage.pName = "main"; pipeline_info.stage.pSpecializationInfo = nullptr; VkPipeline cs_pipeline; m_errorMonitor->ExpectSuccess(); vkCreateComputePipelines(device(), VK_NULL_HANDLE, 1, &pipeline_info, nullptr, &cs_pipeline); m_errorMonitor->VerifyNotFound(); vkDestroyPipeline(device(), cs_pipeline, nullptr); vkDestroyShaderModule(device(), shader_module, nullptr); vkDestroyDescriptorPool(device(), ds_pool, nullptr); vkDestroyPipelineLayout(device(), pipeline_layout, nullptr); vkDestroyDescriptorSetLayout(device(), ds_layout, nullptr); } TEST_F(VkPositiveLayerTest, CreatePipelineCheckShaderCapabilityExtension1of2) { // This is a positive test, no errors expected // Verifies the ability to deal with a shader that declares a non-unique SPIRV capability ID TEST_DESCRIPTION("Create a shader in which uses a non-unique capability ID extension, 1 of 2"); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (!DeviceExtensionSupported(gpu(), nullptr, VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME)) { printf("%s Extension %s not supported, skipping this pass. \n", kSkipPrefix, VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME); return; } m_device_extension_names.push_back(VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitState()); // These tests require that the device support multiViewport if (!m_device->phy().features().multiViewport) { printf("%s Device does not support multiViewport, test skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Vertex shader using viewport array capability char const *vsSource = "#version 450\n" "#extension GL_ARB_shader_viewport_layer_array : enable\n" "void main() {\n" " gl_ViewportIndex = 1;\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); const VkPipelineLayoutObj pipe_layout(m_device, {}); m_errorMonitor->ExpectSuccess(); pipe.CreateVKPipeline(pipe_layout.handle(), renderPass()); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CreatePipelineCheckShaderCapabilityExtension2of2) { // This is a positive test, no errors expected // Verifies the ability to deal with a shader that declares a non-unique SPIRV capability ID TEST_DESCRIPTION("Create a shader in which uses a non-unique capability ID extension, 2 of 2"); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (!DeviceExtensionSupported(gpu(), nullptr, VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME)) { printf("%s Extension %s not supported, skipping this pass. \n", kSkipPrefix, VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME); return; } m_device_extension_names.push_back(VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitState()); // These tests require that the device support multiViewport if (!m_device->phy().features().multiViewport) { printf("%s Device does not support multiViewport, test skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Vertex shader using viewport array capability char const *vsSource = "#version 450\n" "#extension GL_ARB_shader_viewport_layer_array : enable\n" "void main() {\n" " gl_ViewportIndex = 1;\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); const VkPipelineLayoutObj pipe_layout(m_device, {}); m_errorMonitor->ExpectSuccess(); pipe.CreateVKPipeline(pipe_layout.handle(), renderPass()); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, CreatePipelineFragmentInputNotProvided) { TEST_DESCRIPTION( "Test that an error is produced for a fragment shader input which is not present in the outputs of the previous stage"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "not written by vertex shader"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) in float x;\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(x);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineFragmentInputNotProvidedInBlock) { TEST_DESCRIPTION( "Test that an error is produced for a fragment shader input within an interace block, which is not present in the outputs " "of the previous stage."); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "not written by vertex shader"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "in block { layout(location=0) float x; } ins;\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(ins.x);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineVsFsTypeMismatchArraySize) { TEST_DESCRIPTION("Test that an error is produced for mismatched array sizes across the vertex->fragment shader interface"); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "Type mismatch on location 0.0: 'ptr to output arr[2] of float32' vs 'ptr to input arr[1] of float32'"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "layout(location=0) out float x[2];\n" "void main(){\n" " x[0] = 0; x[1] = 0;\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) in float x[1];\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(x[0]);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineVsFsTypeMismatch) { TEST_DESCRIPTION("Test that an error is produced for mismatched types across the vertex->fragment shader interface"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Type mismatch on location 0"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "layout(location=0) out int x;\n" "void main(){\n" " x = 0;\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) in float x;\n" /* VS writes int */ "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(x);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineVsFsTypeMismatchInBlock) { TEST_DESCRIPTION( "Test that an error is produced for mismatched types across the vertex->fragment shader interface, when the variable is " "contained within an interface block"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Type mismatch on location 0"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "out block { layout(location=0) int x; } outs;\n" "void main(){\n" " outs.x = 0;\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "in block { layout(location=0) float x; } ins;\n" /* VS writes int */ "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(ins.x);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineVsFsMismatchByLocation) { TEST_DESCRIPTION( "Test that an error is produced for location mismatches across the vertex->fragment shader interface; This should manifest " "as a not-written/not-consumed pair, but flushes out broken walking of the interfaces"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "location 0.0 which is not written by vertex shader"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "out block { layout(location=1) float x; } outs;\n" "void main(){\n" " outs.x = 0;\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "in block { layout(location=0) float x; } ins;\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(ins.x);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineVsFsMismatchByComponent) { TEST_DESCRIPTION( "Test that an error is produced for component mismatches across the vertex->fragment shader interface. It's not enough to " "have the same set of locations in use; matching is defined in terms of spirv variables."); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "location 0.1 which is not written by vertex shader"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "out block { layout(location=0, component=0) float x; } outs;\n" "void main(){\n" " outs.x = 0;\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "in block { layout(location=0, component=1) float x; } ins;\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(ins.x);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineVsFsMismatchByPrecision) { TEST_DESCRIPTION("Test that the RelaxedPrecision decoration is validated to match"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "layout(location=0) out mediump float x;\n" "void main() { gl_Position = vec4(0); x = 1.0; }\n"; char const *fsSource = "#version 450\n" "layout(location=0) in highp float x;\n" "layout(location=0) out vec4 color;\n" "void main() { color = vec4(x); }\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "differ in precision"); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineVsFsMismatchByPrecisionBlock) { TEST_DESCRIPTION("Test that the RelaxedPrecision decoration is validated to match"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "out block { layout(location=0) mediump float x; };\n" "void main() { gl_Position = vec4(0); x = 1.0; }\n"; char const *fsSource = "#version 450\n" "in block { layout(location=0) highp float x; };\n" "layout(location=0) out vec4 color;\n" "void main() { color = vec4(x); }\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "differ in precision"); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineAttribNotConsumed) { TEST_DESCRIPTION("Test that a warning is produced for a vertex attribute which is not consumed by the vertex shader"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, "location 0 not consumed by vertex shader"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkVertexInputBindingDescription input_binding; memset(&input_binding, 0, sizeof(input_binding)); VkVertexInputAttributeDescription input_attrib; memset(&input_attrib, 0, sizeof(input_attrib)); input_attrib.format = VK_FORMAT_R32_SFLOAT; char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddVertexInputBindings(&input_binding, 1); pipe.AddVertexInputAttribs(&input_attrib, 1); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineAttribLocationMismatch) { TEST_DESCRIPTION( "Test that a warning is produced for a location mismatch on vertex attributes. This flushes out bad behavior in the " "interface walker"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, "location 0 not consumed by vertex shader"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkVertexInputBindingDescription input_binding; memset(&input_binding, 0, sizeof(input_binding)); VkVertexInputAttributeDescription input_attrib; memset(&input_attrib, 0, sizeof(input_attrib)); input_attrib.format = VK_FORMAT_R32_SFLOAT; char const *vsSource = "#version 450\n" "\n" "layout(location=1) in float x;\n" "void main(){\n" " gl_Position = vec4(x);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddVertexInputBindings(&input_binding, 1); pipe.AddVertexInputAttribs(&input_attrib, 1); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); m_errorMonitor->SetUnexpectedError("Vertex shader consumes input at location 1 but not provided"); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineAttribNotProvided) { TEST_DESCRIPTION("Test that an error is produced for a vertex shader input which is not provided by a vertex attribute"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Vertex shader consumes input at location 0 but not provided"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "layout(location=0) in vec4 x;\n" /* not provided */ "void main(){\n" " gl_Position = x;\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineAttribTypeMismatch) { TEST_DESCRIPTION( "Test that an error is produced for a mismatch between the fundamental type (float/int/uint) of an attribute and the " "vertex shader input that consumes it"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "location 0 does not match vertex shader input type"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkVertexInputBindingDescription input_binding; memset(&input_binding, 0, sizeof(input_binding)); VkVertexInputAttributeDescription input_attrib; memset(&input_attrib, 0, sizeof(input_attrib)); input_attrib.format = VK_FORMAT_R32_SFLOAT; char const *vsSource = "#version 450\n" "\n" "layout(location=0) in int x;\n" /* attrib provided float */ "void main(){\n" " gl_Position = vec4(x);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddVertexInputBindings(&input_binding, 1); pipe.AddVertexInputAttribs(&input_attrib, 1); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineDuplicateStage) { TEST_DESCRIPTION("Test that an error is produced for a pipeline containing multiple shaders for the same stage"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Multiple shaders provided for stage VK_SHADER_STAGE_VERTEX_BIT"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&vs); // intentionally duplicate vertex shader attachment pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineMissingEntrypoint) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "No entrypoint found named `foo`"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "void main(){\n" " gl_Position = vec4(0);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this, "foo"); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineDepthStencilRequired) { m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "pDepthStencilState is NULL when rasterization is enabled and subpass uses a depth/stencil attachment"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "void main(){ gl_Position = vec4(0); }\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); VkAttachmentDescription attachments[] = { { 0, VK_FORMAT_B8G8R8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, }, { 0, VK_FORMAT_D16_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, }, }; VkAttachmentReference refs[] = { {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {1, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}, }; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &refs[0], nullptr, &refs[1], 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 2, attachments, 1, &subpass, 0, nullptr}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), rp); m_errorMonitor->VerifyFound(); vkDestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkLayerTest, CreatePipelineTessPatchDecorationMismatch) { TEST_DESCRIPTION( "Test that an error is produced for a variable output from the TCS without the patch decoration, but consumed in the TES " "with the decoration."); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "is per-vertex in tessellation control shader stage but per-patch in tessellation evaluation shader stage"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); if (!m_device->phy().features().tessellationShader) { printf("%s Device does not support tessellation shaders; skipped.\n", kSkipPrefix); return; } char const *vsSource = "#version 450\n" "void main(){}\n"; char const *tcsSource = "#version 450\n" "layout(location=0) out int x[];\n" "layout(vertices=3) out;\n" "void main(){\n" " gl_TessLevelOuter[0] = gl_TessLevelOuter[1] = gl_TessLevelOuter[2] = 1;\n" " gl_TessLevelInner[0] = 1;\n" " x[gl_InvocationID] = gl_InvocationID;\n" "}\n"; char const *tesSource = "#version 450\n" "layout(triangles, equal_spacing, cw) in;\n" "layout(location=0) patch in int x;\n" "void main(){\n" " gl_Position.xyz = gl_TessCoord;\n" " gl_Position.w = x;\n" "}\n"; char const *fsSource = "#version 450\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj tcs(m_device, tcsSource, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, this); VkShaderObj tes(m_device, tesSource, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineInputAssemblyStateCreateInfo iasci{VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, nullptr, 0, VK_PRIMITIVE_TOPOLOGY_PATCH_LIST, VK_FALSE}; VkPipelineTessellationStateCreateInfo tsci{VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, nullptr, 0, 3}; VkPipelineObj pipe(m_device); pipe.SetInputAssembly(&iasci); pipe.SetTessellation(&tsci); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&tcs); pipe.AddShader(&tes); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineTessErrors) { TEST_DESCRIPTION("Test various errors when creating a graphics pipeline with tessellation stages active."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); if (!m_device->phy().features().tessellationShader) { printf("%s Device does not support tessellation shaders; skipped.\n", kSkipPrefix); return; } char const *vsSource = "#version 450\n" "void main(){}\n"; char const *tcsSource = "#version 450\n" "layout(vertices=3) out;\n" "void main(){\n" " gl_TessLevelOuter[0] = gl_TessLevelOuter[1] = gl_TessLevelOuter[2] = 1;\n" " gl_TessLevelInner[0] = 1;\n" "}\n"; char const *tesSource = "#version 450\n" "layout(triangles, equal_spacing, cw) in;\n" "void main(){\n" " gl_Position.xyz = gl_TessCoord;\n" " gl_Position.w = 0;\n" "}\n"; char const *fsSource = "#version 450\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj tcs(m_device, tcsSource, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, this); VkShaderObj tes(m_device, tesSource, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineInputAssemblyStateCreateInfo iasci{VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, nullptr, 0, VK_PRIMITIVE_TOPOLOGY_PATCH_LIST, VK_FALSE}; VkPipelineTessellationStateCreateInfo tsci{VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, nullptr, 0, 3}; VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); { VkPipelineObj pipe(m_device); VkPipelineInputAssemblyStateCreateInfo iasci_bad = iasci; iasci_bad.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; // otherwise we get a failure about invalid topology pipe.SetInputAssembly(&iasci_bad); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); // Pass a tess control shader without a tess eval shader pipe.AddShader(&tcs); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-pStages-00729"); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } { VkPipelineObj pipe(m_device); VkPipelineInputAssemblyStateCreateInfo iasci_bad = iasci; iasci_bad.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; // otherwise we get a failure about invalid topology pipe.SetInputAssembly(&iasci_bad); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); // Pass a tess eval shader without a tess control shader pipe.AddShader(&tes); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-pStages-00730"); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } { VkPipelineObj pipe(m_device); pipe.SetInputAssembly(&iasci); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); // Pass patch topology without tessellation shaders m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-topology-00737"); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); pipe.AddShader(&tcs); pipe.AddShader(&tes); // Pass a NULL pTessellationState (with active tessellation shader stages) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-pStages-00731"); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); // Pass an invalid pTessellationState (bad sType) VkPipelineTessellationStateCreateInfo tsci_bad = tsci; tsci_bad.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; pipe.SetTessellation(&tsci_bad); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineTessellationStateCreateInfo-sType-sType"); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); // Pass out-of-range patchControlPoints tsci_bad = tsci; tsci_bad.patchControlPoints = 0; pipe.SetTessellation(&tsci); pipe.SetTessellation(&tsci_bad); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineTessellationStateCreateInfo-patchControlPoints-01214"); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); tsci_bad.patchControlPoints = m_device->props.limits.maxTessellationPatchSize + 1; pipe.SetTessellation(&tsci_bad); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineTessellationStateCreateInfo-patchControlPoints-01214"); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); pipe.SetTessellation(&tsci); // Pass an invalid primitive topology VkPipelineInputAssemblyStateCreateInfo iasci_bad = iasci; iasci_bad.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; pipe.SetInputAssembly(&iasci_bad); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-pStages-00736"); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); pipe.SetInputAssembly(&iasci); } } TEST_F(VkLayerTest, CreatePipelineAttribBindingConflict) { TEST_DESCRIPTION( "Test that an error is produced for a vertex attribute setup where multiple bindings provide the same location"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Duplicate vertex input binding descriptions for binding 0"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); /* Two binding descriptions for binding 0 */ VkVertexInputBindingDescription input_bindings[2]; memset(input_bindings, 0, sizeof(input_bindings)); VkVertexInputAttributeDescription input_attrib; memset(&input_attrib, 0, sizeof(input_attrib)); input_attrib.format = VK_FORMAT_R32_SFLOAT; char const *vsSource = "#version 450\n" "\n" "layout(location=0) in float x;\n" /* attrib provided float */ "void main(){\n" " gl_Position = vec4(x);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddVertexInputBindings(input_bindings, 2); pipe.AddVertexInputAttribs(&input_attrib, 1); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); m_errorMonitor->SetUnexpectedError("VUID-VkPipelineVertexInputStateCreateInfo-pVertexBindingDescriptions-00616 "); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineFragmentOutputNotWritten) { TEST_DESCRIPTION( "Test that an error is produced for a fragment shader which does not provide an output for one of the pipeline's color " "attachments"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "Attachment 0 not written by fragment shader"); ASSERT_NO_FATAL_FAILURE(Init()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "void main(){\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); /* set up CB 0, not written */ pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkPositiveLayerTest, CreatePipelineFragmentOutputNotWrittenButMasked) { TEST_DESCRIPTION( "Test that no error is produced when the fragment shader fails to declare an output, but the corresponding attachment's " "write mask is 0."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "void main(){\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); /* set up CB 0, not written, but also masked */ pipe.AddDefaultColorAttachment(0); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, CreatePipelineFragmentOutputNotConsumed) { TEST_DESCRIPTION( "Test that a warning is produced for a fragment shader which provides a spurious output with no matching attachment"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "fragment shader writes to output location 1 with no matching attachment"); ASSERT_NO_FATAL_FAILURE(Init()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 x;\n" "layout(location=1) out vec4 y;\n" /* no matching attachment for this */ "void main(){\n" " x = vec4(1);\n" " y = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); /* set up CB 0, not written */ pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); /* FS writes CB 1, but we don't configure it */ VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineFragmentOutputNotConsumedButAlphaToCoverageEnabled) { TEST_DESCRIPTION( "Test that no warning is produced when writing to non-existing color attachment if alpha to coverage is enabled."); m_errorMonitor->ExpectSuccess(VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT); ASSERT_NO_FATAL_FAILURE(Init()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); VkPipelineMultisampleStateCreateInfo ms_state_ci = {}; ms_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; ms_state_ci.alphaToCoverageEnable = VK_TRUE; pipe.SetMSAA(&ms_state_ci); ASSERT_NO_FATAL_FAILURE(InitRenderTarget(0u)); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, CreatePipelineFragmentNoOutputLocation0ButAlphaToCoverageEnabled) { TEST_DESCRIPTION("Test that an error is produced when alpha to coverage is enabled but no output at location 0 is declared."); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "fragment shader doesn't declare alpha output at location 0 even though alpha to coverage is enabled."); ASSERT_NO_FATAL_FAILURE(Init()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "void main(){\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); VkPipelineMultisampleStateCreateInfo ms_state_ci = {}; ms_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; ms_state_ci.alphaToCoverageEnable = VK_TRUE; pipe.SetMSAA(&ms_state_ci); ASSERT_NO_FATAL_FAILURE(InitRenderTarget(0u)); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineFragmentNoAlphaLocation0ButAlphaToCoverageEnabled) { TEST_DESCRIPTION( "Test that an error is produced when alpha to coverage is enabled but output at location 0 doesn't have alpha channel."); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "fragment shader doesn't declare alpha output at location 0 even though alpha to coverage is enabled."); ASSERT_NO_FATAL_FAILURE(Init()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "layout(location=0) out vec3 x;\n" "\n" "void main(){\n" " x = vec3(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); VkPipelineMultisampleStateCreateInfo ms_state_ci = {}; ms_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; ms_state_ci.alphaToCoverageEnable = VK_TRUE; pipe.SetMSAA(&ms_state_ci); ASSERT_NO_FATAL_FAILURE(InitRenderTarget(0u)); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineFragmentOutputTypeMismatch) { TEST_DESCRIPTION( "Test that an error is produced for a mismatch between the fundamental type of an fragment shader output variable, and the " "format of the corresponding attachment"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "does not match fragment shader output type"); ASSERT_NO_FATAL_FAILURE(Init()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out ivec4 x;\n" /* not UNORM */ "void main(){\n" " x = ivec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); /* set up CB 0; type is UNORM by default */ pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineExceedMaxVertexOutputComponents) { TEST_DESCRIPTION( "Test that an error is produced when the number of output components from the vertex stage exceeds the device limit"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Vertex shader exceeds " "VkPhysicalDeviceLimits::maxVertexOutputComponents"); ASSERT_NO_FATAL_FAILURE(Init()); const uint32_t maxVsOutComp = m_device->props.limits.maxVertexOutputComponents; std::string vsSourceStr = "#version 450\n\n"; const uint32_t numVec4 = maxVsOutComp / 4; uint32_t location = 0; for (uint32_t i = 0; i < numVec4; i++) { vsSourceStr += "layout(location=" + std::to_string(location) + ") out vec4 v" + std::to_string(i) + ";\n"; location += 1; } const uint32_t remainder = maxVsOutComp % 4; if (remainder != 0) { if (remainder == 1) { vsSourceStr += "layout(location=" + std::to_string(location) + ") out float" + " vn;\n"; } else { vsSourceStr += "layout(location=" + std::to_string(location) + ") out vec" + std::to_string(remainder) + " vn;\n"; } location += 1; } vsSourceStr += "layout(location=" + std::to_string(location) + ") out vec4 exceedLimit;\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; std::string fsSourceStr = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSourceStr.c_str(), VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSourceStr.c_str(), VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); // Set up CB 0; type is UNORM by default pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineExceedMaxTessellationControlInputOutputComponents) { TEST_DESCRIPTION( "Test that errors are produced when the number of per-vertex input and/or output components to the tessellation control " "stage exceeds the device limit"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Tessellation control shader exceeds " "VkPhysicalDeviceLimits::maxTessellationControlPerVertexInputComponents"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Tessellation control shader exceeds " "VkPhysicalDeviceLimits::maxTessellationControlPerVertexOutputComponents"); ASSERT_NO_FATAL_FAILURE(Init()); VkPhysicalDeviceFeatures feat; vkGetPhysicalDeviceFeatures(gpu(), &feat); if (!feat.tessellationShader) { printf("%s tessellation shader stage(s) unsupported.\n", kSkipPrefix); return; } std::string vsSourceStr = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; // Tessellation control stage std::string tcsSourceStr = "#version 450\n" "\n"; // Input components const uint32_t maxTescInComp = m_device->props.limits.maxTessellationControlPerVertexInputComponents; const uint32_t numInVec4 = maxTescInComp / 4; uint32_t inLocation = 0; for (uint32_t i = 0; i < numInVec4; i++) { tcsSourceStr += "layout(location=" + std::to_string(inLocation) + ") in vec4 v" + std::to_string(i) + "In[];\n"; inLocation += 1; } const uint32_t inRemainder = maxTescInComp % 4; if (inRemainder != 0) { if (inRemainder == 1) { tcsSourceStr += "layout(location=" + std::to_string(inLocation) + ") in float" + " vnIn[];\n"; } else { tcsSourceStr += "layout(location=" + std::to_string(inLocation) + ") in vec" + std::to_string(inRemainder) + " vnIn[];\n"; } inLocation += 1; } tcsSourceStr += "layout(location=" + std::to_string(inLocation) + ") in vec4 exceedLimitIn[];\n\n"; // Output components const uint32_t maxTescOutComp = m_device->props.limits.maxTessellationControlPerVertexOutputComponents; const uint32_t numOutVec4 = maxTescOutComp / 4; uint32_t outLocation = 0; for (uint32_t i = 0; i < numOutVec4; i++) { tcsSourceStr += "layout(location=" + std::to_string(outLocation) + ") out vec4 v" + std::to_string(i) + "Out[3];\n"; outLocation += 1; } const uint32_t outRemainder = maxTescOutComp % 4; if (outRemainder != 0) { if (outRemainder == 1) { tcsSourceStr += "layout(location=" + std::to_string(outLocation) + ") out float" + " vnOut[3];\n"; } else { tcsSourceStr += "layout(location=" + std::to_string(outLocation) + ") out vec" + std::to_string(outRemainder) + " vnOut[3];\n"; } outLocation += 1; } tcsSourceStr += "layout(location=" + std::to_string(outLocation) + ") out vec4 exceedLimitOut[3];\n"; tcsSourceStr += "layout(vertices=3) out;\n"; // Finalize tcsSourceStr += "\n" "void main(){\n" " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n" "}\n"; std::string tesSourceStr = "#version 450\n" "\n" "layout(triangles) in;" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; std::string fsSourceStr = "#version 450\n" "\n" "layout(location=0) out vec4 color;" "\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSourceStr.c_str(), VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj tcs(m_device, tcsSourceStr.c_str(), VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, this); VkShaderObj tes(m_device, tesSourceStr.c_str(), VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, this); VkShaderObj fs(m_device, fsSourceStr.c_str(), VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&tcs); pipe.AddShader(&tes); pipe.AddShader(&fs); // Set up CB 0; type is UNORM by default pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); VkPipelineInputAssemblyStateCreateInfo inputAssemblyInfo = {}; inputAssemblyInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; inputAssemblyInfo.pNext = NULL; inputAssemblyInfo.flags = 0; inputAssemblyInfo.topology = VK_PRIMITIVE_TOPOLOGY_PATCH_LIST; inputAssemblyInfo.primitiveRestartEnable = VK_FALSE; pipe.SetInputAssembly(&inputAssemblyInfo); VkPipelineTessellationStateCreateInfo tessInfo = {}; tessInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO; tessInfo.pNext = NULL; tessInfo.flags = 0; tessInfo.patchControlPoints = 3; pipe.SetTessellation(&tessInfo); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineExceedMaxTessellationEvaluationInputOutputComponents) { TEST_DESCRIPTION( "Test that errors are produced when the number of input and/or output components to the tessellation evaluation stage " "exceeds the device limit"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Tessellation evaluation shader exceeds " "VkPhysicalDeviceLimits::maxTessellationEvaluationInputComponents"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Tessellation evaluation shader exceeds " "VkPhysicalDeviceLimits::maxTessellationEvaluationOutputComponents"); ASSERT_NO_FATAL_FAILURE(Init()); VkPhysicalDeviceFeatures feat; vkGetPhysicalDeviceFeatures(gpu(), &feat); if (!feat.tessellationShader) { printf("%s tessellation shader stage(s) unsupported.\n", kSkipPrefix); return; } std::string vsSourceStr = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; std::string tcsSourceStr = "#version 450\n" "\n" "layout (vertices = 3) out;\n" "\n" "void main(){\n" " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n" "}\n"; // Tessellation evaluation stage std::string tesSourceStr = "#version 450\n" "\n" "layout (triangles) in;\n" "\n"; // Input components const uint32_t maxTeseInComp = m_device->props.limits.maxTessellationEvaluationInputComponents; const uint32_t numInVec4 = maxTeseInComp / 4; uint32_t inLocation = 0; for (uint32_t i = 0; i < numInVec4; i++) { tesSourceStr += "layout(location=" + std::to_string(inLocation) + ") in vec4 v" + std::to_string(i) + "In[];\n"; inLocation += 1; } const uint32_t inRemainder = maxTeseInComp % 4; if (inRemainder != 0) { if (inRemainder == 1) { tesSourceStr += "layout(location=" + std::to_string(inLocation) + ") in float" + " vnIn[];\n"; } else { tesSourceStr += "layout(location=" + std::to_string(inLocation) + ") in vec" + std::to_string(inRemainder) + " vnIn[];\n"; } inLocation += 1; } tesSourceStr += "layout(location=" + std::to_string(inLocation) + ") in vec4 exceedLimitIn[];\n\n"; // Output components const uint32_t maxTeseOutComp = m_device->props.limits.maxTessellationEvaluationOutputComponents; const uint32_t numOutVec4 = maxTeseOutComp / 4; uint32_t outLocation = 0; for (uint32_t i = 0; i < numOutVec4; i++) { tesSourceStr += "layout(location=" + std::to_string(outLocation) + ") out vec4 v" + std::to_string(i) + "Out;\n"; outLocation += 1; } const uint32_t outRemainder = maxTeseOutComp % 4; if (outRemainder != 0) { if (outRemainder == 1) { tesSourceStr += "layout(location=" + std::to_string(outLocation) + ") out float" + " vnOut;\n"; } else { tesSourceStr += "layout(location=" + std::to_string(outLocation) + ") out vec" + std::to_string(outRemainder) + " vnOut;\n"; } outLocation += 1; } tesSourceStr += "layout(location=" + std::to_string(outLocation) + ") out vec4 exceedLimitOut;\n"; // Finalize tesSourceStr += "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; std::string fsSourceStr = "#version 450\n" "\n" "layout(location=0) out vec4 color;" "\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSourceStr.c_str(), VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj tcs(m_device, tcsSourceStr.c_str(), VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, this); VkShaderObj tes(m_device, tesSourceStr.c_str(), VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, this); VkShaderObj fs(m_device, fsSourceStr.c_str(), VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&tcs); pipe.AddShader(&tes); pipe.AddShader(&fs); // Set up CB 0; type is UNORM by default pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); VkPipelineInputAssemblyStateCreateInfo inputAssemblyInfo = {}; inputAssemblyInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; inputAssemblyInfo.pNext = NULL; inputAssemblyInfo.flags = 0; inputAssemblyInfo.topology = VK_PRIMITIVE_TOPOLOGY_PATCH_LIST; inputAssemblyInfo.primitiveRestartEnable = VK_FALSE; pipe.SetInputAssembly(&inputAssemblyInfo); VkPipelineTessellationStateCreateInfo tessInfo = {}; tessInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO; tessInfo.pNext = NULL; tessInfo.flags = 0; tessInfo.patchControlPoints = 3; pipe.SetTessellation(&tessInfo); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineExceedMaxGeometryInputOutputComponents) { TEST_DESCRIPTION( "Test that errors are produced when the number of input and/or output components to the geometry stage exceeds the device " "limit"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Geometry shader exceeds " "VkPhysicalDeviceLimits::maxGeometryInputComponents"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Geometry shader exceeds " "VkPhysicalDeviceLimits::maxGeometryOutputComponents"); ASSERT_NO_FATAL_FAILURE(Init()); VkPhysicalDeviceFeatures feat; vkGetPhysicalDeviceFeatures(gpu(), &feat); if (!feat.geometryShader) { printf("%s geometry shader stage unsupported.\n", kSkipPrefix); return; } std::string vsSourceStr = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; std::string gsSourceStr = "#version 450\n" "\n" "layout(triangles) in;\n" "layout(invocations=1) in;\n"; // Input components const uint32_t maxGeomInComp = m_device->props.limits.maxGeometryInputComponents; const uint32_t numInVec4 = maxGeomInComp / 4; uint32_t inLocation = 0; for (uint32_t i = 0; i < numInVec4; i++) { gsSourceStr += "layout(location=" + std::to_string(inLocation) + ") in vec4 v" + std::to_string(i) + "In[];\n"; inLocation += 1; } const uint32_t inRemainder = maxGeomInComp % 4; if (inRemainder != 0) { if (inRemainder == 1) { gsSourceStr += "layout(location=" + std::to_string(inLocation) + ") in float" + " vnIn[];\n"; } else { gsSourceStr += "layout(location=" + std::to_string(inLocation) + ") in vec" + std::to_string(inRemainder) + " vnIn[];\n"; } inLocation += 1; } gsSourceStr += "layout(location=" + std::to_string(inLocation) + ") in vec4 exceedLimitIn[];\n\n"; // Output components const uint32_t maxGeomOutComp = m_device->props.limits.maxGeometryOutputComponents; const uint32_t numOutVec4 = maxGeomOutComp / 4; uint32_t outLocation = 0; for (uint32_t i = 0; i < numOutVec4; i++) { gsSourceStr += "layout(location=" + std::to_string(outLocation) + ") out vec4 v" + std::to_string(i) + "Out;\n"; outLocation += 1; } const uint32_t outRemainder = maxGeomOutComp % 4; if (outRemainder != 0) { if (outRemainder == 1) { gsSourceStr += "layout(location=" + std::to_string(outLocation) + ") out float" + " vnOut;\n"; } else { gsSourceStr += "layout(location=" + std::to_string(outLocation) + ") out vec" + std::to_string(outRemainder) + " vnOut;\n"; } outLocation += 1; } gsSourceStr += "layout(location=" + std::to_string(outLocation) + ") out vec4 exceedLimitOut;\n"; // Finalize gsSourceStr += "layout(triangle_strip, max_vertices=3) out;\n" "\n" "void main(){\n" " exceedLimitOut = vec4(1);\n" "}\n"; std::string fsSourceStr = "#version 450\n" "\n" "layout(location=0) out vec4 color;" "\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSourceStr.c_str(), VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj gs(m_device, gsSourceStr.c_str(), VK_SHADER_STAGE_GEOMETRY_BIT, this); VkShaderObj fs(m_device, fsSourceStr.c_str(), VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&gs); pipe.AddShader(&fs); // Set up CB 0; type is UNORM by default pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineExceedMaxFragmentInputComponents) { TEST_DESCRIPTION( "Test that an error is produced when the number of input components from the fragment stage exceeds the device limit"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Fragment shader exceeds " "VkPhysicalDeviceLimits::maxFragmentInputComponents"); ASSERT_NO_FATAL_FAILURE(Init()); std::string vsSourceStr = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; const uint32_t maxFsInComp = m_device->props.limits.maxFragmentInputComponents; std::string fsSourceStr = "#version 450\n\n"; const uint32_t numVec4 = maxFsInComp / 4; uint32_t location = 0; for (uint32_t i = 0; i < numVec4; i++) { fsSourceStr += "layout(location=" + std::to_string(location) + ") in vec4 v" + std::to_string(i) + ";\n"; location += 1; } const uint32_t remainder = maxFsInComp % 4; if (remainder != 0) { if (remainder == 1) { fsSourceStr += "layout(location=" + std::to_string(location) + ") in float" + " vn;\n"; } else { fsSourceStr += "layout(location=" + std::to_string(location) + ") in vec" + std::to_string(remainder) + " vn;\n"; } location += 1; } fsSourceStr += "layout(location=" + std::to_string(location) + ") in vec4 exceedLimit;\n" "\n" "layout(location=0) out vec4 color;" "\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSourceStr.c_str(), VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSourceStr.c_str(), VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); // Set up CB 0; type is UNORM by default pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineUniformBlockNotProvided) { TEST_DESCRIPTION( "Test that an error is produced for a shader consuming a uniform block which has no corresponding binding in the pipeline " "layout"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "not declared in pipeline layout"); ASSERT_NO_FATAL_FAILURE(Init()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 x;\n" "layout(set=0) layout(binding=0) uniform foo { int x; int y; } bar;\n" "void main(){\n" " x = vec4(bar.y);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); /* set up CB 0; type is UNORM by default */ pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelinePushConstantsNotInLayout) { TEST_DESCRIPTION( "Test that an error is produced for a shader consuming push constants which are not provided in the pipeline layout"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "not declared in layout"); ASSERT_NO_FATAL_FAILURE(Init()); char const *vsSource = "#version 450\n" "\n" "layout(push_constant, std430) uniform foo { float x; } consts;\n" "void main(){\n" " gl_Position = vec4(consts.x);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); /* set up CB 0; type is UNORM by default */ pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); /* should have generated an error -- no push constant ranges provided! */ m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineInputAttachmentMissing) { TEST_DESCRIPTION( "Test that an error is produced for a shader consuming an input attachment which is not included in the subpass " "description"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "consumes input attachment index 0 but not provided in subpass"); ASSERT_NO_FATAL_FAILURE(Init()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(input_attachment_index=0, set=0, binding=0) uniform subpassInput x;\n" "layout(location=0) out vec4 color;\n" "void main() {\n" " color = subpassLoad(x);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetLayoutBinding dslb = {0, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; const VkDescriptorSetLayoutObj dsl(m_device, {dslb}); const VkPipelineLayoutObj pl(m_device, {&dsl}); // error here. pipe.CreateVKPipeline(pl.handle(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineInputAttachmentTypeMismatch) { TEST_DESCRIPTION( "Test that an error is produced for a shader consuming an input attachment with a format having a different fundamental " "type"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "input attachment 0 format of VK_FORMAT_R8G8B8A8_UINT does not match"); ASSERT_NO_FATAL_FAILURE(Init()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(input_attachment_index=0, set=0, binding=0) uniform subpassInput x;\n" "layout(location=0) out vec4 color;\n" "void main() {\n" " color = subpassLoad(x);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetLayoutBinding dslb = {0, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; const VkDescriptorSetLayoutObj dsl(m_device, {dslb}); const VkPipelineLayoutObj pl(m_device, {&dsl}); VkAttachmentDescription descs[2] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {0, VK_FORMAT_R8G8B8A8_UINT, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL}, }; VkAttachmentReference color = { 0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, }; VkAttachmentReference input = { 1, VK_IMAGE_LAYOUT_GENERAL, }; VkSubpassDescription sd = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 1, &input, 1, &color, nullptr, nullptr, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 2, descs, 1, &sd, 0, nullptr}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); // error here. pipe.CreateVKPipeline(pl.handle(), rp); m_errorMonitor->VerifyFound(); vkDestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkLayerTest, CreatePipelineInputAttachmentMissingArray) { TEST_DESCRIPTION( "Test that an error is produced for a shader consuming an input attachment which is not included in the subpass " "description -- array case"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "consumes input attachment index 0 but not provided in subpass"); ASSERT_NO_FATAL_FAILURE(Init()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(input_attachment_index=0, set=0, binding=0) uniform subpassInput xs[1];\n" "layout(location=0) out vec4 color;\n" "void main() {\n" " color = subpassLoad(xs[0]);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetLayoutBinding dslb = {0, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 2, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; const VkDescriptorSetLayoutObj dsl(m_device, {dslb}); const VkPipelineLayoutObj pl(m_device, {&dsl}); // error here. pipe.CreateVKPipeline(pl.handle(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreateComputePipelineMissingDescriptor) { TEST_DESCRIPTION( "Test that an error is produced for a compute pipeline consuming a descriptor which is not provided in the pipeline " "layout"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Shader uses descriptor slot 0.0"); ASSERT_NO_FATAL_FAILURE(Init()); char const *csSource = "#version 450\n" "\n" "layout(local_size_x=1) in;\n" "layout(set=0, binding=0) buffer block { vec4 x; };\n" "void main(){\n" " x = vec4(1);\n" "}\n"; VkShaderObj cs(m_device, csSource, VK_SHADER_STAGE_COMPUTE_BIT, this); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); VkComputePipelineCreateInfo cpci = {VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, nullptr, 0, {VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, nullptr, 0, VK_SHADER_STAGE_COMPUTE_BIT, cs.handle(), "main", nullptr}, descriptorSet.GetPipelineLayout(), VK_NULL_HANDLE, -1}; VkPipeline pipe; VkResult err = vkCreateComputePipelines(m_device->device(), VK_NULL_HANDLE, 1, &cpci, nullptr, &pipe); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyPipeline(m_device->device(), pipe, nullptr); } } TEST_F(VkLayerTest, CreateComputePipelineDescriptorTypeMismatch) { TEST_DESCRIPTION("Test that an error is produced for a pipeline consuming a descriptor-backed resource of a mismatched type"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "but descriptor of type VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER"); ASSERT_NO_FATAL_FAILURE(Init()); VkDescriptorSetLayoutBinding binding = {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}; const VkDescriptorSetLayoutObj dsl(m_device, {binding}); const VkPipelineLayoutObj pl(m_device, {&dsl}); char const *csSource = "#version 450\n" "\n" "layout(local_size_x=1) in;\n" "layout(set=0, binding=0) buffer block { vec4 x; };\n" "void main() {\n" " x.x = 1.0f;\n" "}\n"; VkShaderObj cs(m_device, csSource, VK_SHADER_STAGE_COMPUTE_BIT, this); VkComputePipelineCreateInfo cpci = {VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, nullptr, 0, {VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, nullptr, 0, VK_SHADER_STAGE_COMPUTE_BIT, cs.handle(), "main", nullptr}, pl.handle(), VK_NULL_HANDLE, -1}; VkPipeline pipe; VkResult err = vkCreateComputePipelines(m_device->device(), VK_NULL_HANDLE, 1, &cpci, nullptr, &pipe); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyPipeline(m_device->device(), pipe, nullptr); } } TEST_F(VkLayerTest, DrawTimeImageViewTypeMismatchWithPipeline) { TEST_DESCRIPTION( "Test that an error is produced when an image view type does not match the dimensionality declared in the shader"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "requires an image view of type VK_IMAGE_VIEW_TYPE_3D"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "void main() { gl_Position = vec4(0); }\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform sampler3D s;\n" "layout(location=0) out vec4 color;\n" "void main() {\n" " color = texture(s, vec3(0));\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); VkTextureObj texture(m_device, nullptr); VkSamplerObj sampler(m_device); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendSamplerTexture(&sampler, &texture); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); VkResult err = pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); m_commandBuffer->BindDescriptorSet(descriptorSet); VkViewport viewport = {0, 0, 16, 16, 0, 1}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); // error produced here. vkCmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, DrawTimeImageMultisampleMismatchWithPipeline) { TEST_DESCRIPTION( "Test that an error is produced when a multisampled images are consumed via singlesample images types in the shader, or " "vice versa."); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "requires bound image to have multiple samples"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "void main() { gl_Position = vec4(0); }\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform sampler2DMS s;\n" "layout(location=0) out vec4 color;\n" "void main() {\n" " color = texelFetch(s, ivec2(0), 0);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); VkTextureObj texture(m_device, nullptr); // THIS LINE CAUSES CRASH ON MALI VkSamplerObj sampler(m_device); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendSamplerTexture(&sampler, &texture); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); VkResult err = pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); m_commandBuffer->BindDescriptorSet(descriptorSet); VkViewport viewport = {0, 0, 16, 16, 0, 1}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); // error produced here. vkCmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, DrawTimeImageComponentTypeMismatchWithPipeline) { TEST_DESCRIPTION( "Test that an error is produced when the component type of an imageview disagrees with the type in the shader."); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "SINT component type, but bound descriptor"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "void main() { gl_Position = vec4(0); }\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform isampler2D s;\n" "layout(location=0) out vec4 color;\n" "void main() {\n" " color = texelFetch(s, ivec2(0), 0);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); VkTextureObj texture(m_device, nullptr); // UNORM texture by default, incompatible with isampler2D VkSamplerObj sampler(m_device); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendSamplerTexture(&sampler, &texture); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); VkResult err = pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); m_commandBuffer->BindDescriptorSet(descriptorSet); VkViewport viewport = {0, 0, 16, 16, 0, 1}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); // error produced here. vkCmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, AttachmentDescriptionUndefinedFormat) { TEST_DESCRIPTION("Create a render pass with an attachment description format set to VK_FORMAT_UNDEFINED"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "format is VK_FORMAT_UNDEFINED"); VkAttachmentReference color_attach = {}; color_attach.layout = VK_IMAGE_LAYOUT_GENERAL; color_attach.attachment = 0; VkSubpassDescription subpass = {}; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &color_attach; VkRenderPassCreateInfo rpci = {}; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 1; VkAttachmentDescription attach_desc = {}; attach_desc.format = VK_FORMAT_UNDEFINED; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; rpci.pAttachments = &attach_desc; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; VkRenderPass rp; VkResult result = vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); m_errorMonitor->VerifyFound(); if (result == VK_SUCCESS) { vkDestroyRenderPass(m_device->device(), rp, NULL); } } TEST_F(VkLayerTest, CreateImageViewNoMemoryBoundToImage) { VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " used with no memory bound. Memory should be bound by calling vkBindImageMemory()."); ASSERT_NO_FATAL_FAILURE(Init()); // Create an image and try to create a view with no memory backing the image VkImage image; const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; const int32_t tex_width = 32; const int32_t tex_height = 32; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = tex_width; image_create_info.extent.height = tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT; image_create_info.flags = 0; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); VkImageViewCreateInfo image_view_create_info = {}; image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; image_view_create_info.image = image; image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D; image_view_create_info.format = tex_format; image_view_create_info.subresourceRange.layerCount = 1; image_view_create_info.subresourceRange.baseMipLevel = 0; image_view_create_info.subresourceRange.levelCount = 1; image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; VkImageView view; err = vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &view); m_errorMonitor->VerifyFound(); vkDestroyImage(m_device->device(), image, NULL); // If last error is success, it still created the view, so delete it. if (err == VK_SUCCESS) { vkDestroyImageView(m_device->device(), view, NULL); } } TEST_F(VkLayerTest, InvalidImageViewAspect) { TEST_DESCRIPTION("Create an image and try to create a view with an invalid aspectMask"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresource-aspectMask-parameter"); ASSERT_NO_FATAL_FAILURE(Init()); const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; VkImageObj image(m_device); image.Init(32, 32, 1, tex_format, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_LINEAR, 0); ASSERT_TRUE(image.initialized()); VkImageViewCreateInfo image_view_create_info = {}; image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; image_view_create_info.image = image.handle(); image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D; image_view_create_info.format = tex_format; image_view_create_info.subresourceRange.baseMipLevel = 0; image_view_create_info.subresourceRange.levelCount = 1; image_view_create_info.subresourceRange.layerCount = 1; // Cause an error by setting an invalid image aspect image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_METADATA_BIT; VkImageView view; vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &view); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ExerciseGetImageSubresourceLayout) { TEST_DESCRIPTION("Test vkGetImageSubresourceLayout() valid usages"); ASSERT_NO_FATAL_FAILURE(Init()); VkSubresourceLayout subres_layout = {}; // VU 00732: image must have been created with tiling equal to VK_IMAGE_TILING_LINEAR { const VkImageTiling tiling = VK_IMAGE_TILING_OPTIMAL; // ERROR: violates VU 00732 VkImageObj img(m_device); img.InitNoLayout(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, tiling); ASSERT_TRUE(img.initialized()); VkImageSubresource subres = {}; subres.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; subres.mipLevel = 0; subres.arrayLayer = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetImageSubresourceLayout-image-00996"); vkGetImageSubresourceLayout(m_device->device(), img.image(), &subres, &subres_layout); m_errorMonitor->VerifyFound(); } // VU 00733: The aspectMask member of pSubresource must only have a single bit set { VkImageObj img(m_device); img.InitNoLayout(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_SRC_BIT); ASSERT_TRUE(img.initialized()); VkImageSubresource subres = {}; subres.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_METADATA_BIT; // ERROR: triggers VU 00733 subres.mipLevel = 0; subres.arrayLayer = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetImageSubresourceLayout-aspectMask-00997"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresource-aspectMask-parameter"); vkGetImageSubresourceLayout(m_device->device(), img.image(), &subres, &subres_layout); m_errorMonitor->VerifyFound(); } // 00739 mipLevel must be less than the mipLevels specified in VkImageCreateInfo when the image was created { VkImageObj img(m_device); img.InitNoLayout(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_SRC_BIT); ASSERT_TRUE(img.initialized()); VkImageSubresource subres = {}; subres.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; subres.mipLevel = 1; // ERROR: triggers VU 00739 subres.arrayLayer = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetImageSubresourceLayout-mipLevel-01716"); vkGetImageSubresourceLayout(m_device->device(), img.image(), &subres, &subres_layout); m_errorMonitor->VerifyFound(); } // 00740 arrayLayer must be less than the arrayLayers specified in VkImageCreateInfo when the image was created { VkImageObj img(m_device); img.InitNoLayout(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_SRC_BIT); ASSERT_TRUE(img.initialized()); VkImageSubresource subres = {}; subres.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; subres.mipLevel = 0; subres.arrayLayer = 1; // ERROR: triggers VU 00740 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetImageSubresourceLayout-arrayLayer-01717"); vkGetImageSubresourceLayout(m_device->device(), img.image(), &subres, &subres_layout); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, CopyImageLayerCountMismatch) { TEST_DESCRIPTION( "Try to copy between images with the source subresource having a different layerCount than the destination subresource"); ASSERT_NO_FATAL_FAILURE(Init()); // Create two images to copy between VkImageObj src_image_obj(m_device); VkImageObj dst_image_obj(m_device); VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 32; image_create_info.extent.height = 32; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 4; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image_create_info.flags = 0; src_image_obj.init(&image_create_info); ASSERT_TRUE(src_image_obj.initialized()); image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT; dst_image_obj.init(&image_create_info); ASSERT_TRUE(dst_image_obj.initialized()); m_commandBuffer->begin(); VkImageCopy copyRegion; copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.srcSubresource.mipLevel = 0; copyRegion.srcSubresource.baseArrayLayer = 0; copyRegion.srcSubresource.layerCount = 1; copyRegion.srcOffset.x = 0; copyRegion.srcOffset.y = 0; copyRegion.srcOffset.z = 0; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.dstSubresource.mipLevel = 0; copyRegion.dstSubresource.baseArrayLayer = 0; // Introduce failure by forcing the dst layerCount to differ from src copyRegion.dstSubresource.layerCount = 3; copyRegion.dstOffset.x = 0; copyRegion.dstOffset.y = 0; copyRegion.dstOffset.z = 0; copyRegion.extent.width = 1; copyRegion.extent.height = 1; copyRegion.extent.depth = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-extent-00140"); m_commandBuffer->CopyImage(src_image_obj.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image_obj.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©Region); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ImageLayerUnsupportedFormat) { TEST_DESCRIPTION("Creating images with unsupported formats "); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Create image with unsupported format - Expect FORMAT_UNSUPPORTED VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_UNDEFINED; image_create_info.extent.width = 32; image_create_info.extent.height = 32; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-format-00943"); VkImage image; vkCreateImage(m_device->handle(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreateImageViewFormatMismatchUnrelated) { TEST_DESCRIPTION("Create an image with a color format, then try to create a depth view of it"); if (!EnableDeviceProfileLayer()) { printf("%s Failed to enable device profile layer.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(InitState()); // Load required functions PFN_vkSetPhysicalDeviceFormatPropertiesEXT fpvkSetPhysicalDeviceFormatPropertiesEXT = (PFN_vkSetPhysicalDeviceFormatPropertiesEXT)vkGetInstanceProcAddr(instance(), "vkSetPhysicalDeviceFormatPropertiesEXT"); PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT = (PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT)vkGetInstanceProcAddr(instance(), "vkGetOriginalPhysicalDeviceFormatPropertiesEXT"); if (!(fpvkSetPhysicalDeviceFormatPropertiesEXT) || !(fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT)) { printf("%s Can't find device_profile_api functions; skipped.\n", kSkipPrefix); return; } auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s Couldn't find depth stencil image format.\n", kSkipPrefix); return; } VkFormatProperties formatProps; fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), depth_format, &formatProps); formatProps.optimalTilingFeatures |= VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT; fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), depth_format, formatProps); VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView imgView; VkImageViewCreateInfo imgViewInfo = {}; imgViewInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; imgViewInfo.image = image.handle(); imgViewInfo.viewType = VK_IMAGE_VIEW_TYPE_2D; imgViewInfo.format = depth_format; imgViewInfo.subresourceRange.layerCount = 1; imgViewInfo.subresourceRange.baseMipLevel = 0; imgViewInfo.subresourceRange.levelCount = 1; imgViewInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // Can't use depth format for view into color image - Expect INVALID_FORMAT m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "Formats MUST be IDENTICAL unless VK_IMAGE_CREATE_MUTABLE_FORMAT BIT was set on image creation."); vkCreateImageView(m_device->handle(), &imgViewInfo, NULL, &imgView); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreateImageViewNoMutableFormatBit) { TEST_DESCRIPTION("Create an image view with a different format, when the image does not have MUTABLE_FORMAT bit"); if (!EnableDeviceProfileLayer()) { printf("%s Couldn't enable device profile layer.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(InitState()); PFN_vkSetPhysicalDeviceFormatPropertiesEXT fpvkSetPhysicalDeviceFormatPropertiesEXT = nullptr; PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT = nullptr; // Load required functions if (!LoadDeviceProfileLayer(fpvkSetPhysicalDeviceFormatPropertiesEXT, fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT)) { printf("%s Required extensions are not present.\n", kSkipPrefix); return; } VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkFormatProperties formatProps; fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_B8G8R8A8_UINT, &formatProps); formatProps.optimalTilingFeatures |= VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT; fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_B8G8R8A8_UINT, formatProps); VkImageView imgView; VkImageViewCreateInfo imgViewInfo = {}; imgViewInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; imgViewInfo.image = image.handle(); imgViewInfo.viewType = VK_IMAGE_VIEW_TYPE_2D; imgViewInfo.format = VK_FORMAT_B8G8R8A8_UINT; imgViewInfo.subresourceRange.layerCount = 1; imgViewInfo.subresourceRange.baseMipLevel = 0; imgViewInfo.subresourceRange.levelCount = 1; imgViewInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // Same compatibility class but no MUTABLE_FORMAT bit - Expect // VIEW_CREATE_ERROR m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-01019"); vkCreateImageView(m_device->handle(), &imgViewInfo, NULL, &imgView); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreateImageViewDifferentClass) { TEST_DESCRIPTION("Passing bad parameters to CreateImageView"); ASSERT_NO_FATAL_FAILURE(Init()); if (!(m_device->format_properties(VK_FORMAT_R8_UINT).optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) { printf("%s Device does not support R8_UINT as color attachment; skipped", kSkipPrefix); return; } VkImageCreateInfo mutImgInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT, VK_IMAGE_TYPE_2D, VK_FORMAT_R8_UINT, {128, 128, 1}, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; VkImageObj mutImage(m_device); mutImage.init(&mutImgInfo); ASSERT_TRUE(mutImage.initialized()); VkImageView imgView; VkImageViewCreateInfo imgViewInfo = {}; imgViewInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; imgViewInfo.viewType = VK_IMAGE_VIEW_TYPE_2D; imgViewInfo.format = VK_FORMAT_B8G8R8A8_UNORM; imgViewInfo.subresourceRange.layerCount = 1; imgViewInfo.subresourceRange.baseMipLevel = 0; imgViewInfo.subresourceRange.levelCount = 1; imgViewInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; imgViewInfo.image = mutImage.handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-01018"); vkCreateImageView(m_device->handle(), &imgViewInfo, NULL, &imgView); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, MultiplaneIncompatibleViewFormat) { TEST_DESCRIPTION("Postive/negative tests of multiplane imageview format compatibility"); // Enable KHR multiplane req'd extensions bool mp_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION); if (mp_extensions) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); if (mp_extensions) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); } else { printf("%s test requires KHR multiplane extensions, not available. Skipping.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkImageCreateInfo ci = {}; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_SAMPLED_BIT; ci.extent = {128, 128, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Verify format VkFormatFeatureFlags features = VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT; bool supported = ImageFormatAndFeaturesSupported(instance(), gpu(), ci, features); if (!supported) { printf("%s Multiplane image format not supported. Skipping test.\n", kSkipPrefix); return; } VkImageObj image_obj(m_device); image_obj.init(&ci); ASSERT_TRUE(image_obj.initialized()); VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image_obj.image(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_R8_SNORM; // Compat is VK_FORMAT_R8_UNORM ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_PLANE_1_BIT; // Incompatible format error VkImageView imageView = VK_NULL_HANDLE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-01586"); vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); vkDestroyImageView(m_device->device(), imageView, NULL); // VK_NULL_HANDLE allowed imageView = VK_NULL_HANDLE; // Correct format succeeds ivci.format = VK_FORMAT_R8_UNORM; m_errorMonitor->ExpectSuccess(); vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyNotFound(); vkDestroyImageView(m_device->device(), imageView, NULL); // VK_NULL_HANDLE allowed imageView = VK_NULL_HANDLE; // Try a multiplane imageview ivci.format = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; m_errorMonitor->ExpectSuccess(); vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyNotFound(); vkDestroyImageView(m_device->device(), imageView, NULL); // VK_NULL_HANDLE allowed } TEST_F(VkLayerTest, CreateImageViewInvalidSubresourceRange) { TEST_DESCRIPTION("Passing bad image subrange to CreateImageView"); ASSERT_NO_FATAL_FAILURE(Init()); VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(image.create_info().arrayLayers == 1); ASSERT_TRUE(image.initialized()); VkImageView img_view; VkImageViewCreateInfo img_view_info_template = {}; img_view_info_template.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; img_view_info_template.image = image.handle(); img_view_info_template.viewType = VK_IMAGE_VIEW_TYPE_2D_ARRAY; img_view_info_template.format = image.format(); // subresourceRange to be filled later for the purposes of this test img_view_info_template.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_view_info_template.subresourceRange.baseMipLevel = 0; img_view_info_template.subresourceRange.levelCount = 0; img_view_info_template.subresourceRange.baseArrayLayer = 0; img_view_info_template.subresourceRange.layerCount = 0; // Try baseMipLevel >= image.mipLevels with VK_REMAINING_MIP_LEVELS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01478"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 1, VK_REMAINING_MIP_LEVELS, 0, 1}; VkImageViewCreateInfo img_view_info = img_view_info_template; img_view_info.subresourceRange = range; vkCreateImageView(m_device->handle(), &img_view_info, nullptr, &img_view); m_errorMonitor->VerifyFound(); } // Try baseMipLevel >= image.mipLevels without VK_REMAINING_MIP_LEVELS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01478"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01718"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 1, 1, 0, 1}; VkImageViewCreateInfo img_view_info = img_view_info_template; img_view_info.subresourceRange = range; vkCreateImageView(m_device->handle(), &img_view_info, nullptr, &img_view); m_errorMonitor->VerifyFound(); } // Try levelCount = 0 { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01718"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 0, 1}; VkImageViewCreateInfo img_view_info = img_view_info_template; img_view_info.subresourceRange = range; vkCreateImageView(m_device->handle(), &img_view_info, nullptr, &img_view); m_errorMonitor->VerifyFound(); } // Try baseMipLevel + levelCount > image.mipLevels { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01718"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 2, 0, 1}; VkImageViewCreateInfo img_view_info = img_view_info_template; img_view_info.subresourceRange = range; vkCreateImageView(m_device->handle(), &img_view_info, nullptr, &img_view); m_errorMonitor->VerifyFound(); } // These tests rely on having the Maintenance1 extension not being enabled, and are invalid on all but version 1.0 if (m_device->props.apiVersion < VK_API_VERSION_1_1) { // Try baseArrayLayer >= image.arrayLayers with VK_REMAINING_ARRAY_LAYERS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01480"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 1, VK_REMAINING_ARRAY_LAYERS}; VkImageViewCreateInfo img_view_info = img_view_info_template; img_view_info.subresourceRange = range; vkCreateImageView(m_device->handle(), &img_view_info, nullptr, &img_view); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer >= image.arrayLayers without VK_REMAINING_ARRAY_LAYERS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01480"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01719"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 1, 1}; VkImageViewCreateInfo img_view_info = img_view_info_template; img_view_info.subresourceRange = range; vkCreateImageView(m_device->handle(), &img_view_info, nullptr, &img_view); m_errorMonitor->VerifyFound(); } // Try layerCount = 0 { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01719"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 0}; VkImageViewCreateInfo img_view_info = img_view_info_template; img_view_info.subresourceRange = range; vkCreateImageView(m_device->handle(), &img_view_info, nullptr, &img_view); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer + layerCount > image.arrayLayers { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01719"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 2}; VkImageViewCreateInfo img_view_info = img_view_info_template; img_view_info.subresourceRange = range; vkCreateImageView(m_device->handle(), &img_view_info, nullptr, &img_view); m_errorMonitor->VerifyFound(); } } } TEST_F(VkLayerTest, CompressedImageMipCopyTests) { TEST_DESCRIPTION("Image/Buffer copies for higher mip levels"); ASSERT_NO_FATAL_FAILURE(Init()); VkPhysicalDeviceFeatures device_features = {}; ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features)); VkFormat compressed_format = VK_FORMAT_UNDEFINED; if (device_features.textureCompressionBC) { compressed_format = VK_FORMAT_BC3_SRGB_BLOCK; } else if (device_features.textureCompressionETC2) { compressed_format = VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK; } else if (device_features.textureCompressionASTC_LDR) { compressed_format = VK_FORMAT_ASTC_4x4_UNORM_BLOCK; } else { printf("%s No compressed formats supported - CompressedImageMipCopyTests skipped.\n", kSkipPrefix); return; } VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = compressed_format; ci.extent = {32, 32, 1}; ci.mipLevels = 6; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; VkImageObj image(m_device); image.init(&ci); ASSERT_TRUE(image.initialized()); VkImageObj odd_image(m_device); ci.extent = {31, 32, 1}; // Mips are [31,32] [15,16] [7,8] [3,4], [1,2] [1,1] odd_image.init(&ci); ASSERT_TRUE(odd_image.initialized()); // Allocate buffers VkMemoryPropertyFlags reqs = 0; VkBufferObj buffer_1024, buffer_64, buffer_16, buffer_8; buffer_1024.init_as_src_and_dst(*m_device, 1024, reqs); buffer_64.init_as_src_and_dst(*m_device, 64, reqs); buffer_16.init_as_src_and_dst(*m_device, 16, reqs); buffer_8.init_as_src_and_dst(*m_device, 8, reqs); VkBufferImageCopy region = {}; region.bufferRowLength = 0; region.bufferImageHeight = 0; region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; region.imageSubresource.layerCount = 1; region.imageOffset = {0, 0, 0}; region.bufferOffset = 0; // start recording m_commandBuffer->begin(); // Mip level copies that work - 5 levels m_errorMonitor->ExpectSuccess(); // Mip 0 should fit in 1k buffer - 1k texels @ 1b each region.imageExtent = {32, 32, 1}; region.imageSubresource.mipLevel = 0; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_1024.handle(), 1, ®ion); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_1024.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ®ion); // Mip 2 should fit in 64b buffer - 64 texels @ 1b each region.imageExtent = {8, 8, 1}; region.imageSubresource.mipLevel = 2; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_64.handle(), 1, ®ion); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_64.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ®ion); // Mip 3 should fit in 16b buffer - 16 texels @ 1b each region.imageExtent = {4, 4, 1}; region.imageSubresource.mipLevel = 3; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, ®ion); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ®ion); // Mip 4&5 should fit in 16b buffer with no complaint - 4 & 1 texels @ 1b each region.imageExtent = {2, 2, 1}; region.imageSubresource.mipLevel = 4; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, ®ion); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ®ion); region.imageExtent = {1, 1, 1}; region.imageSubresource.mipLevel = 5; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, ®ion); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ®ion); m_errorMonitor->VerifyNotFound(); // Buffer must accommodate a full compressed block, regardless of texel count m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00183"); vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_8.handle(), 1, ®ion); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-pRegions-00171"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_8.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ®ion); m_errorMonitor->VerifyFound(); // Copy width < compressed block size, but not the full mip width region.imageExtent = {1, 2, 1}; region.imageSubresource.mipLevel = 4; m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-imageExtent-00207"); // width not a multiple of compressed block width m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-imageOffset-01794"); // image transfer granularity vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, ®ion); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-imageExtent-00207"); // width not a multiple of compressed block width m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-imageOffset-01793"); // image transfer granularity vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ®ion); m_errorMonitor->VerifyFound(); // Copy height < compressed block size but not the full mip height region.imageExtent = {2, 1, 1}; m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-imageExtent-00208"); // height not a multiple of compressed block width m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-imageOffset-01794"); // image transfer granularity vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, ®ion); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-imageExtent-00208"); // height not a multiple of compressed block width m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-imageOffset-01793"); // image transfer granularity vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ®ion); m_errorMonitor->VerifyFound(); // Offsets must be multiple of compressed block size region.imageOffset = {1, 1, 0}; region.imageExtent = {1, 1, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-imageOffset-00205"); // imageOffset not a multiple of block size m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-imageOffset-01794"); // image transfer granularity vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, ®ion); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-imageOffset-00205"); // imageOffset not a multiple of block size m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-imageOffset-01793"); // image transfer granularity vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ®ion); m_errorMonitor->VerifyFound(); // Offset + extent width = mip width - should succeed region.imageOffset = {4, 4, 0}; region.imageExtent = {3, 4, 1}; region.imageSubresource.mipLevel = 2; m_errorMonitor->ExpectSuccess(); vkCmdCopyImageToBuffer(m_commandBuffer->handle(), odd_image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, ®ion); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), odd_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ®ion); m_errorMonitor->VerifyNotFound(); // Offset + extent width > mip width, but still within the final compressed block - should succeed region.imageExtent = {4, 4, 1}; m_errorMonitor->ExpectSuccess(); vkCmdCopyImageToBuffer(m_commandBuffer->handle(), odd_image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, ®ion); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), odd_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ®ion); m_errorMonitor->VerifyNotFound(); // Offset + extent width < mip width and not a multiple of block width - should fail region.imageExtent = {3, 3, 1}; m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-imageExtent-00208"); // offset+extent not a multiple of block width m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-imageOffset-01794"); // image transfer granularity vkCmdCopyImageToBuffer(m_commandBuffer->handle(), odd_image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, ®ion); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-imageExtent-00208"); // offset+extent not a multiple of block width m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-imageOffset-01793"); // image transfer granularity vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), odd_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ®ion); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ImageBufferCopyTests) { TEST_DESCRIPTION("Image to buffer and buffer to image tests"); ASSERT_NO_FATAL_FAILURE(Init()); // Bail if any dimension of transfer granularity is 0. auto index = m_device->graphics_queue_node_index_; auto queue_family_properties = m_device->phy().queue_properties(); if ((queue_family_properties[index].minImageTransferGranularity.depth == 0) || (queue_family_properties[index].minImageTransferGranularity.width == 0) || (queue_family_properties[index].minImageTransferGranularity.height == 0)) { printf("%s Subresource copies are disallowed when xfer granularity (x|y|z) is 0. Skipped.\n", kSkipPrefix); return; } VkImageObj image_64k(m_device); // 128^2 texels, 64k VkImageObj image_16k(m_device); // 64^2 texels, 16k VkImageObj image_16k_depth(m_device); // 64^2 texels, depth, 16k VkImageObj ds_image_4D_1S(m_device); // 256^2 texels, 512kb (256k depth, 64k stencil, 192k pack) VkImageObj ds_image_3D_1S(m_device); // 256^2 texels, 256kb (192k depth, 64k stencil) VkImageObj ds_image_2D(m_device); // 256^2 texels, 128k (128k depth) VkImageObj ds_image_1S(m_device); // 256^2 texels, 64k (64k stencil) image_64k.Init(128, 128, 1, VK_FORMAT_R8G8B8A8_UINT, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); image_16k.Init(64, 64, 1, VK_FORMAT_R8G8B8A8_UINT, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image_64k.initialized()); ASSERT_TRUE(image_16k.initialized()); // Verify all needed Depth/Stencil formats are supported bool missing_ds_support = false; VkFormatProperties props = {0, 0, 0}; vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), VK_FORMAT_D32_SFLOAT_S8_UINT, &props); missing_ds_support |= (props.bufferFeatures == 0 && props.linearTilingFeatures == 0 && props.optimalTilingFeatures == 0); missing_ds_support |= (props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_SRC_BIT) == 0; missing_ds_support |= (props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_DST_BIT) == 0; vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), VK_FORMAT_D24_UNORM_S8_UINT, &props); missing_ds_support |= (props.bufferFeatures == 0 && props.linearTilingFeatures == 0 && props.optimalTilingFeatures == 0); missing_ds_support |= (props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_SRC_BIT) == 0; missing_ds_support |= (props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_DST_BIT) == 0; vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), VK_FORMAT_D16_UNORM, &props); missing_ds_support |= (props.bufferFeatures == 0 && props.linearTilingFeatures == 0 && props.optimalTilingFeatures == 0); missing_ds_support |= (props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_SRC_BIT) == 0; missing_ds_support |= (props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_DST_BIT) == 0; vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), VK_FORMAT_S8_UINT, &props); missing_ds_support |= (props.bufferFeatures == 0 && props.linearTilingFeatures == 0 && props.optimalTilingFeatures == 0); missing_ds_support |= (props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_SRC_BIT) == 0; missing_ds_support |= (props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_DST_BIT) == 0; if (!missing_ds_support) { image_16k_depth.Init(64, 64, 1, VK_FORMAT_D24_UNORM_S8_UINT, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image_16k_depth.initialized()); ds_image_4D_1S.Init( 256, 256, 1, VK_FORMAT_D32_SFLOAT_S8_UINT, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(ds_image_4D_1S.initialized()); ds_image_3D_1S.Init( 256, 256, 1, VK_FORMAT_D24_UNORM_S8_UINT, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(ds_image_3D_1S.initialized()); ds_image_2D.Init( 256, 256, 1, VK_FORMAT_D16_UNORM, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(ds_image_2D.initialized()); ds_image_1S.Init( 256, 256, 1, VK_FORMAT_S8_UINT, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(ds_image_1S.initialized()); } // Allocate buffers VkBufferObj buffer_256k, buffer_128k, buffer_64k, buffer_16k; VkMemoryPropertyFlags reqs = 0; buffer_256k.init_as_src_and_dst(*m_device, 262144, reqs); // 256k buffer_128k.init_as_src_and_dst(*m_device, 131072, reqs); // 128k buffer_64k.init_as_src_and_dst(*m_device, 65536, reqs); // 64k buffer_16k.init_as_src_and_dst(*m_device, 16384, reqs); // 16k VkBufferImageCopy region = {}; region.bufferRowLength = 0; region.bufferImageHeight = 0; region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; region.imageSubresource.layerCount = 1; region.imageOffset = {0, 0, 0}; region.imageExtent = {64, 64, 1}; region.bufferOffset = 0; // attempt copies before putting command buffer in recording state m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-commandBuffer-recording"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_64k.handle(), image_64k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ®ion); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-commandBuffer-recording"); vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_64k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_64k.handle(), 1, ®ion); m_errorMonitor->VerifyFound(); // start recording m_commandBuffer->begin(); // successful copies m_errorMonitor->ExpectSuccess(); vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, ®ion); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16k.handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ®ion); region.imageOffset.x = 16; // 16k copy, offset requires larger image vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_64k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, ®ion); region.imageExtent.height = 78; // > 16k copy requires larger buffer & image vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_64k.handle(), image_64k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ®ion); region.imageOffset.x = 0; region.imageExtent.height = 64; region.bufferOffset = 256; // 16k copy with buffer offset, requires larger buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_64k.handle(), 1, ®ion); m_errorMonitor->VerifyNotFound(); // image/buffer too small (extent too large) on copy to image region.imageExtent = {65, 64, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-pRegions-00171"); // buffer too small vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16k.handle(), image_64k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ®ion); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-pRegions-00172"); // image too small vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_64k.handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ®ion); m_errorMonitor->VerifyFound(); // image/buffer too small (offset) on copy to image region.imageExtent = {64, 64, 1}; region.imageOffset = {0, 4, 0}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-pRegions-00171"); // buffer too small vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16k.handle(), image_64k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ®ion); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-pRegions-00172"); // image too small vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_64k.handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ®ion); m_errorMonitor->VerifyFound(); // image/buffer too small on copy to buffer region.imageExtent = {64, 64, 1}; region.imageOffset = {0, 0, 0}; region.bufferOffset = 4; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00183"); // buffer too small vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_64k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, ®ion); m_errorMonitor->VerifyFound(); region.imageExtent = {64, 65, 1}; region.bufferOffset = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00182"); // image too small vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_64k.handle(), 1, ®ion); m_errorMonitor->VerifyFound(); // buffer size OK but rowlength causes loose packing m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00183"); region.imageExtent = {64, 64, 1}; region.bufferRowLength = 68; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, ®ion); m_errorMonitor->VerifyFound(); // An extent with zero area should produce a warning, but no error m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT | VK_DEBUG_REPORT_ERROR_BIT_EXT, "} has zero area"); region.imageExtent.width = 0; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, ®ion); m_errorMonitor->VerifyFound(); // aspect bits region.imageExtent = {64, 64, 1}; region.bufferRowLength = 0; region.bufferImageHeight = 0; if (!missing_ds_support) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-aspectMask-00212"); // more than 1 aspect bit set region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k_depth.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, ®ion); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-aspectMask-00211"); // different mis-matched aspect region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k_depth.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, ®ion); m_errorMonitor->VerifyFound(); } m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-aspectMask-00211"); // mis-matched aspect region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, ®ion); m_errorMonitor->VerifyFound(); region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // Out-of-range mip levels should fail region.imageSubresource.mipLevel = image_16k.create_info().mipLevels + 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-imageSubresource-01703"); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00182"); // unavoidable "region exceeds image bounds" for non-existent mip vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, ®ion); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-imageSubresource-01701"); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-pRegions-00172"); // unavoidable "region exceeds image bounds" for non-existent mip vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16k.handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ®ion); m_errorMonitor->VerifyFound(); region.imageSubresource.mipLevel = 0; // Out-of-range array layers should fail region.imageSubresource.baseArrayLayer = image_16k.create_info().arrayLayers; region.imageSubresource.layerCount = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-imageSubresource-01704"); vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, ®ion); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-imageSubresource-01702"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16k.handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ®ion); m_errorMonitor->VerifyFound(); region.imageSubresource.baseArrayLayer = 0; // Layout mismatch should fail m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-srcImageLayout-00189"); vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_16k.handle(), 1, ®ion); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-dstImageLayout-00180"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16k.handle(), image_16k.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion); m_errorMonitor->VerifyFound(); // Test Depth/Stencil copies if (missing_ds_support) { printf("%s Depth / Stencil formats unsupported - skipping D/S tests.\n", kSkipPrefix); } else { VkBufferImageCopy ds_region = {}; ds_region.bufferOffset = 0; ds_region.bufferRowLength = 0; ds_region.bufferImageHeight = 0; ds_region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; ds_region.imageSubresource.mipLevel = 0; ds_region.imageSubresource.baseArrayLayer = 0; ds_region.imageSubresource.layerCount = 1; ds_region.imageOffset = {0, 0, 0}; ds_region.imageExtent = {256, 256, 1}; // Depth copies that should succeed m_errorMonitor->ExpectSuccess(); // Extract 4b depth per texel, pack into 256k buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_4D_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_256k.handle(), 1, &ds_region); m_errorMonitor->VerifyNotFound(); m_errorMonitor->ExpectSuccess(); // Extract 3b depth per texel, pack (loose) into 256k buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_3D_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_256k.handle(), 1, &ds_region); m_errorMonitor->VerifyNotFound(); m_errorMonitor->ExpectSuccess(); // Copy 2b depth per texel, into 128k buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_2D.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_128k.handle(), 1, &ds_region); m_errorMonitor->VerifyNotFound(); // Depth copies that should fail ds_region.bufferOffset = 4; m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00183"); // Extract 4b depth per texel, pack into 256k buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_4D_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_256k.handle(), 1, &ds_region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00183"); // Extract 3b depth per texel, pack (loose) into 256k buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_3D_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_256k.handle(), 1, &ds_region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00183"); // Copy 2b depth per texel, into 128k buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_2D.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_128k.handle(), 1, &ds_region); m_errorMonitor->VerifyFound(); // Stencil copies that should succeed ds_region.bufferOffset = 0; ds_region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT; m_errorMonitor->ExpectSuccess(); // Extract 1b stencil per texel, pack into 64k buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_4D_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_64k.handle(), 1, &ds_region); m_errorMonitor->VerifyNotFound(); m_errorMonitor->ExpectSuccess(); // Extract 1b stencil per texel, pack into 64k buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_3D_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_64k.handle(), 1, &ds_region); m_errorMonitor->VerifyNotFound(); m_errorMonitor->ExpectSuccess(); // Copy 1b depth per texel, into 64k buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_64k.handle(), 1, &ds_region); m_errorMonitor->VerifyNotFound(); // Stencil copies that should fail m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00183"); // Extract 1b stencil per texel, pack into 64k buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_4D_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_16k.handle(), 1, &ds_region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00183"); // Extract 1b stencil per texel, pack into 64k buffer ds_region.bufferRowLength = 260; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_3D_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_64k.handle(), 1, &ds_region); m_errorMonitor->VerifyFound(); ds_region.bufferRowLength = 0; ds_region.bufferOffset = 4; m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00183"); // Copy 1b depth per texel, into 64k buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_64k.handle(), 1, &ds_region); m_errorMonitor->VerifyFound(); } // Test compressed formats, if supported VkPhysicalDeviceFeatures device_features = {}; ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features)); if (!(device_features.textureCompressionBC || device_features.textureCompressionETC2 || device_features.textureCompressionASTC_LDR)) { printf("%s No compressed formats supported - block compression tests skipped.\n", kSkipPrefix); } else { VkImageObj image_16k_4x4comp(m_device); // 128^2 texels as 32^2 compressed (4x4) blocks, 16k VkImageObj image_NPOT_4x4comp(m_device); // 130^2 texels as 33^2 compressed (4x4) blocks if (device_features.textureCompressionBC) { image_16k_4x4comp.Init(128, 128, 1, VK_FORMAT_BC3_SRGB_BLOCK, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL, 0); image_NPOT_4x4comp.Init(130, 130, 1, VK_FORMAT_BC3_SRGB_BLOCK, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL, 0); } else if (device_features.textureCompressionETC2) { image_16k_4x4comp.Init(128, 128, 1, VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL, 0); image_NPOT_4x4comp.Init(130, 130, 1, VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL, 0); } else { image_16k_4x4comp.Init(128, 128, 1, VK_FORMAT_ASTC_4x4_UNORM_BLOCK, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL, 0); image_NPOT_4x4comp.Init(130, 130, 1, VK_FORMAT_ASTC_4x4_UNORM_BLOCK, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL, 0); } ASSERT_TRUE(image_16k_4x4comp.initialized()); // Just fits m_errorMonitor->ExpectSuccess(); region.imageExtent = {128, 128, 1}; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, ®ion); m_errorMonitor->VerifyNotFound(); // with offset, too big for buffer m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00183"); region.bufferOffset = 16; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, ®ion); m_errorMonitor->VerifyFound(); region.bufferOffset = 0; // extents that are not a multiple of compressed block size m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-imageExtent-00207"); // extent width not a multiple of block size m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-imageOffset-01794"); // image transfer granularity region.imageExtent.width = 66; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_NPOT_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, ®ion); m_errorMonitor->VerifyFound(); region.imageExtent.width = 128; m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-imageExtent-00208"); // extent height not a multiple of block size m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-imageOffset-01794"); // image transfer granularity region.imageExtent.height = 2; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_NPOT_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, ®ion); m_errorMonitor->VerifyFound(); region.imageExtent.height = 128; // TODO: All available compressed formats are 2D, with block depth of 1. Unable to provoke VU_01277. // non-multiple extents are allowed if at the far edge of a non-block-multiple image - these should pass m_errorMonitor->ExpectSuccess(); region.imageExtent.width = 66; region.imageOffset.x = 64; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_NPOT_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, ®ion); region.imageExtent.width = 16; region.imageOffset.x = 0; region.imageExtent.height = 2; region.imageOffset.y = 128; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_NPOT_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, ®ion); m_errorMonitor->VerifyNotFound(); region.imageOffset = {0, 0, 0}; // buffer offset must be a multiple of texel block size (16) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-bufferOffset-00206"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-bufferOffset-00193"); region.imageExtent = {64, 64, 1}; region.bufferOffset = 24; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, ®ion); m_errorMonitor->VerifyFound(); // rowlength not a multiple of block width (4) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-bufferRowLength-00203"); region.bufferOffset = 0; region.bufferRowLength = 130; region.bufferImageHeight = 0; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_64k.handle(), 1, ®ion); m_errorMonitor->VerifyFound(); // imageheight not a multiple of block height (4) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-bufferImageHeight-00204"); region.bufferRowLength = 0; region.bufferImageHeight = 130; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_64k.handle(), 1, ®ion); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, MiscImageLayerTests) { TEST_DESCRIPTION("Image-related tests that don't belong elsewhere"); ASSERT_NO_FATAL_FAILURE(Init()); // TODO: Ideally we should check if a format is supported, before using it. VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_R16G16B16A16_UINT, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); // 64bpp ASSERT_TRUE(image.initialized()); VkBufferObj buffer; VkMemoryPropertyFlags reqs = 0; buffer.init_as_src(*m_device, 128 * 128 * 8, reqs); VkBufferImageCopy region = {}; region.bufferRowLength = 128; region.bufferImageHeight = 128; region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // layerCount can't be 0 - Expect MISMATCHED_IMAGE_ASPECT region.imageSubresource.layerCount = 1; region.imageExtent.height = 4; region.imageExtent.width = 4; region.imageExtent.depth = 1; VkImageObj image2(m_device); image2.Init(128, 128, 1, VK_FORMAT_R8G8_UNORM, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); // 16bpp ASSERT_TRUE(image2.initialized()); VkBufferObj buffer2; VkMemoryPropertyFlags reqs2 = 0; buffer2.init_as_src(*m_device, 128 * 128 * 2, reqs2); VkBufferImageCopy region2 = {}; region2.bufferRowLength = 128; region2.bufferImageHeight = 128; region2.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // layerCount can't be 0 - Expect MISMATCHED_IMAGE_ASPECT region2.imageSubresource.layerCount = 1; region2.imageExtent.height = 4; region2.imageExtent.width = 4; region2.imageExtent.depth = 1; m_commandBuffer->begin(); // Image must have offset.z of 0 and extent.depth of 1 // Introduce failure by setting imageExtent.depth to 0 region.imageExtent.depth = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-srcImage-00201"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer.handle(), image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion); m_errorMonitor->VerifyFound(); region.imageExtent.depth = 1; // Image must have offset.z of 0 and extent.depth of 1 // Introduce failure by setting imageOffset.z to 4 // Note: Also (unavoidably) triggers 'region exceeds image' #1228 region.imageOffset.z = 4; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-srcImage-00201"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-pRegions-00172"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer.handle(), image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion); m_errorMonitor->VerifyFound(); region.imageOffset.z = 0; // BufferOffset must be a multiple of the calling command's VkImage parameter's texel size // Introduce failure by setting bufferOffset to 1 and 1/2 texels region.bufferOffset = 4; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-bufferOffset-00193"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer.handle(), image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion); m_errorMonitor->VerifyFound(); // BufferOffset must be a multiple of 4 // Introduce failure by setting bufferOffset to a value not divisible by 4 region2.bufferOffset = 6; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-bufferOffset-00194"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer2.handle(), image2.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion2); m_errorMonitor->VerifyFound(); // BufferRowLength must be 0, or greater than or equal to the width member of imageExtent region.bufferOffset = 0; region.imageExtent.height = 128; region.imageExtent.width = 128; // Introduce failure by setting bufferRowLength > 0 but less than width region.bufferRowLength = 64; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-bufferRowLength-00195"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer.handle(), image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion); m_errorMonitor->VerifyFound(); // BufferImageHeight must be 0, or greater than or equal to the height member of imageExtent region.bufferRowLength = 128; // Introduce failure by setting bufferRowHeight > 0 but less than height region.bufferImageHeight = 64; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-bufferImageHeight-00196"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer.handle(), image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion); m_errorMonitor->VerifyFound(); region.bufferImageHeight = 128; VkImageObj intImage1(m_device); intImage1.Init(128, 128, 1, VK_FORMAT_R8_UNORM, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL, 0); intImage1.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL); VkImageObj intImage2(m_device); intImage2.Init(128, 128, 1, VK_FORMAT_R8_UNORM, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); intImage2.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL); VkImageBlit blitRegion = {}; blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.srcSubresource.baseArrayLayer = 0; blitRegion.srcSubresource.layerCount = 1; blitRegion.srcSubresource.mipLevel = 0; blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.dstSubresource.baseArrayLayer = 0; blitRegion.dstSubresource.layerCount = 1; blitRegion.dstSubresource.mipLevel = 0; blitRegion.srcOffsets[0] = {128, 0, 0}; blitRegion.srcOffsets[1] = {128, 128, 1}; blitRegion.dstOffsets[0] = {0, 128, 0}; blitRegion.dstOffsets[1] = {128, 128, 1}; // Look for NULL-blit warning m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "vkCmdBlitImage(): pRegions[0].srcOffsets specify a zero-volume area."); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "vkCmdBlitImage(): pRegions[0].dstOffsets specify a zero-volume area."); vkCmdBlitImage(m_commandBuffer->handle(), intImage1.handle(), intImage1.Layout(), intImage2.handle(), intImage2.Layout(), 1, &blitRegion, VK_FILTER_LINEAR); m_errorMonitor->VerifyFound(); } VkResult GPDIFPHelper(VkPhysicalDevice dev, const VkImageCreateInfo *ci, VkImageFormatProperties *limits = nullptr) { VkImageFormatProperties tmp_limits; limits = limits ? limits : &tmp_limits; return vkGetPhysicalDeviceImageFormatProperties(dev, ci->format, ci->imageType, ci->tiling, ci->usage, ci->flags, limits); } TEST_F(VkLayerTest, CreateImageMiscErrors) { TEST_DESCRIPTION("Misc leftover valid usage errors in VkImageCreateInfo struct"); VkPhysicalDeviceFeatures features{}; ASSERT_NO_FATAL_FAILURE(Init(&features)); VkImage null_image; // throwaway target for all the vkCreateImage VkImageCreateInfo tmp_img_ci = {}; tmp_img_ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; tmp_img_ci.flags = 0; // assumably any is supported tmp_img_ci.imageType = VK_IMAGE_TYPE_2D; // any is supported tmp_img_ci.format = VK_FORMAT_R8G8B8A8_UNORM; // has mandatory support for all usages tmp_img_ci.extent = {64, 64, 1}; // limit is 256 for 3D, or 4096 tmp_img_ci.mipLevels = 1; // any is supported tmp_img_ci.arrayLayers = 1; // limit is 256 tmp_img_ci.samples = VK_SAMPLE_COUNT_1_BIT; // needs to be 1 if TILING_LINEAR // if VK_IMAGE_TILING_LINEAR imageType must be 2D, usage must be TRANSFER, and levels layers samplers all 1 tmp_img_ci.tiling = VK_IMAGE_TILING_OPTIMAL; tmp_img_ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; // depends on format tmp_img_ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; const VkImageCreateInfo safe_image_ci = tmp_img_ci; ASSERT_VK_SUCCESS(GPDIFPHelper(gpu(), &safe_image_ci)); { VkImageCreateInfo image_ci = safe_image_ci; image_ci.sharingMode = VK_SHARING_MODE_CONCURRENT; image_ci.queueFamilyIndexCount = 2; image_ci.pQueueFamilyIndices = nullptr; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-sharingMode-00941"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo image_ci = safe_image_ci; image_ci.sharingMode = VK_SHARING_MODE_CONCURRENT; image_ci.queueFamilyIndexCount = 1; const uint32_t queue_family = 0; image_ci.pQueueFamilyIndices = &queue_family; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-sharingMode-00942"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo image_ci = safe_image_ci; image_ci.format = VK_FORMAT_UNDEFINED; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-format-00943"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo image_ci = safe_image_ci; image_ci.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; image_ci.arrayLayers = 6; image_ci.imageType = VK_IMAGE_TYPE_1D; image_ci.extent = {64, 1, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-flags-00949"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); image_ci = safe_image_ci; image_ci.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; image_ci.imageType = VK_IMAGE_TYPE_3D; image_ci.extent = {4, 4, 4}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-flags-00949"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo image_ci = safe_image_ci; image_ci.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; // always has 4 samples support image_ci.samples = VK_SAMPLE_COUNT_4_BIT; image_ci.imageType = VK_IMAGE_TYPE_3D; image_ci.extent = {4, 4, 4}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-samples-02257"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); image_ci = safe_image_ci; image_ci.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; // always has 4 samples support image_ci.samples = VK_SAMPLE_COUNT_4_BIT; image_ci.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; image_ci.arrayLayers = 6; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-samples-02257"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); image_ci = safe_image_ci; image_ci.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; // always has 4 samples support image_ci.samples = VK_SAMPLE_COUNT_4_BIT; image_ci.tiling = VK_IMAGE_TILING_LINEAR; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-samples-02257"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); image_ci = safe_image_ci; image_ci.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; // always has 4 samples support image_ci.samples = VK_SAMPLE_COUNT_4_BIT; image_ci.mipLevels = 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-samples-02257"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo image_ci = safe_image_ci; image_ci.usage = VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; image_ci.usage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-usage-00963"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); image_ci.usage = VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-usage-00966"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); image_ci.usage = VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT; image_ci.usage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-usage-00963"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-usage-00966"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo image_ci = safe_image_ci; image_ci.flags = VK_IMAGE_CREATE_SPARSE_BINDING_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-flags-00969"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } // InitialLayout not VK_IMAGE_LAYOUT_UNDEFINED or VK_IMAGE_LAYOUT_PREDEFINED { VkImageCreateInfo image_ci = safe_image_ci; image_ci.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-initialLayout-00993"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, CreateImageMinLimitsViolation) { TEST_DESCRIPTION("Create invalid image with invalid parameters violation minimum limit, such as being zero."); ASSERT_NO_FATAL_FAILURE(Init()); VkImage null_image; // throwaway target for all the vkCreateImage VkImageCreateInfo tmp_img_ci = {}; tmp_img_ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; tmp_img_ci.flags = 0; // assumably any is supported tmp_img_ci.imageType = VK_IMAGE_TYPE_2D; // any is supported tmp_img_ci.format = VK_FORMAT_R8G8B8A8_UNORM; // has mandatory support for all usages tmp_img_ci.extent = {1, 1, 1}; // limit is 256 for 3D, or 4096 tmp_img_ci.mipLevels = 1; // any is supported tmp_img_ci.arrayLayers = 1; // limit is 256 tmp_img_ci.samples = VK_SAMPLE_COUNT_1_BIT; // needs to be 1 if TILING_LINEAR // if VK_IMAGE_TILING_LINEAR imageType must be 2D, usage must be TRANSFER, and levels layers samplers all 1 tmp_img_ci.tiling = VK_IMAGE_TILING_OPTIMAL; tmp_img_ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; // depends on format tmp_img_ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; const VkImageCreateInfo safe_image_ci = tmp_img_ci; enum Dimension { kWidth = 0x1, kHeight = 0x2, kDepth = 0x4 }; for (underlying_type::type bad_dimensions = 0x1; bad_dimensions < 0x8; ++bad_dimensions) { VkExtent3D extent = {1, 1, 1}; if (bad_dimensions & kWidth) { extent.width = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-extent-00944"); } if (bad_dimensions & kHeight) { extent.height = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-extent-00945"); } if (bad_dimensions & kDepth) { extent.depth = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-extent-00946"); } VkImageCreateInfo bad_image_ci = safe_image_ci; bad_image_ci.imageType = VK_IMAGE_TYPE_3D; // has to be 3D otherwise it might trigger the non-1 error instead bad_image_ci.extent = extent; vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo bad_image_ci = safe_image_ci; bad_image_ci.mipLevels = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-mipLevels-00947"); vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo bad_image_ci = safe_image_ci; bad_image_ci.arrayLayers = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-arrayLayers-00948"); vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo bad_image_ci = safe_image_ci; bad_image_ci.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; bad_image_ci.arrayLayers = 5; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00954"); vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); bad_image_ci.arrayLayers = 6; bad_image_ci.extent = {64, 63, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00954"); vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo bad_image_ci = safe_image_ci; bad_image_ci.imageType = VK_IMAGE_TYPE_1D; bad_image_ci.extent = {64, 2, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00956"); vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); bad_image_ci.imageType = VK_IMAGE_TYPE_1D; bad_image_ci.extent = {64, 1, 2}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00956"); vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); bad_image_ci.imageType = VK_IMAGE_TYPE_2D; bad_image_ci.extent = {64, 64, 2}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00957"); vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); bad_image_ci.imageType = VK_IMAGE_TYPE_2D; bad_image_ci.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; bad_image_ci.arrayLayers = 6; bad_image_ci.extent = {64, 64, 2}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00957"); vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo bad_image_ci = safe_image_ci; bad_image_ci.imageType = VK_IMAGE_TYPE_3D; bad_image_ci.arrayLayers = 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00961"); vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } } VkFormat FindFormatLinearWithoutMips(VkPhysicalDevice gpu, VkImageCreateInfo image_ci) { image_ci.tiling = VK_IMAGE_TILING_LINEAR; const VkFormat first_vk_format = static_cast(1); const VkFormat last_vk_format = static_cast(130); // avoid compressed/feature protected, otherwise 184 for (VkFormat format = first_vk_format; format <= last_vk_format; format = static_cast(format + 1)) { image_ci.format = format; // WORKAROUND for dev_sim and mock_icd not containing valid format limits yet VkFormatProperties format_props; vkGetPhysicalDeviceFormatProperties(gpu, format, &format_props); const VkFormatFeatureFlags core_filter = 0x1FFF; const auto features = (image_ci.tiling == VK_IMAGE_TILING_LINEAR) ? format_props.linearTilingFeatures & core_filter : format_props.optimalTilingFeatures & core_filter; if (!(features & core_filter)) continue; VkImageFormatProperties img_limits; if (VK_SUCCESS == GPDIFPHelper(gpu, &image_ci, &img_limits) && img_limits.maxMipLevels == 1) return format; } return VK_FORMAT_UNDEFINED; } bool FindFormatWithoutSamples(VkPhysicalDevice gpu, VkImageCreateInfo &image_ci) { const VkFormat first_vk_format = static_cast(1); const VkFormat last_vk_format = static_cast(130); // avoid compressed/feature protected, otherwise 184 for (VkFormat format = first_vk_format; format <= last_vk_format; format = static_cast(format + 1)) { image_ci.format = format; // WORKAROUND for dev_sim and mock_icd not containing valid format limits yet VkFormatProperties format_props; vkGetPhysicalDeviceFormatProperties(gpu, format, &format_props); const VkFormatFeatureFlags core_filter = 0x1FFF; const auto features = (image_ci.tiling == VK_IMAGE_TILING_LINEAR) ? format_props.linearTilingFeatures & core_filter : format_props.optimalTilingFeatures & core_filter; if (!(features & core_filter)) continue; for (VkSampleCountFlagBits samples = VK_SAMPLE_COUNT_64_BIT; samples > 0; samples = static_cast(samples >> 1)) { image_ci.samples = samples; VkImageFormatProperties img_limits; if (VK_SUCCESS == GPDIFPHelper(gpu, &image_ci, &img_limits) && !(img_limits.sampleCounts & samples)) return true; } } return false; } TEST_F(VkLayerTest, CreateImageMaxLimitsViolation) { TEST_DESCRIPTION("Create invalid image with invalid parameters exceeding physical device limits."); ASSERT_NO_FATAL_FAILURE(Init()); VkImage null_image; // throwaway target for all the vkCreateImage VkImageCreateInfo tmp_img_ci = {}; tmp_img_ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; tmp_img_ci.flags = 0; // assumably any is supported tmp_img_ci.imageType = VK_IMAGE_TYPE_2D; // any is supported tmp_img_ci.format = VK_FORMAT_R8G8B8A8_UNORM; // has mandatory support for all usages tmp_img_ci.extent = {1, 1, 1}; // limit is 256 for 3D, or 4096 tmp_img_ci.mipLevels = 1; // any is supported tmp_img_ci.arrayLayers = 1; // limit is 256 tmp_img_ci.samples = VK_SAMPLE_COUNT_1_BIT; // needs to be 1 if TILING_LINEAR // if VK_IMAGE_TILING_LINEAR imageType must be 2D, usage must be TRANSFER, and levels layers samplers all 1 tmp_img_ci.tiling = VK_IMAGE_TILING_OPTIMAL; tmp_img_ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; // depends on format tmp_img_ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; const VkImageCreateInfo safe_image_ci = tmp_img_ci; ASSERT_VK_SUCCESS(GPDIFPHelper(gpu(), &safe_image_ci)); const VkPhysicalDeviceLimits &dev_limits = m_device->props.limits; { VkImageCreateInfo image_ci = safe_image_ci; image_ci.extent = {8, 8, 1}; image_ci.mipLevels = 4 + 1; // 4 = log2(8) + 1 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-mipLevels-00958"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); image_ci.extent = {8, 15, 1}; image_ci.mipLevels = 4 + 1; // 4 = floor(log2(15)) + 1 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-mipLevels-00958"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo image_ci = safe_image_ci; image_ci.tiling = VK_IMAGE_TILING_LINEAR; image_ci.extent = {64, 64, 1}; image_ci.format = FindFormatLinearWithoutMips(gpu(), image_ci); image_ci.mipLevels = 2; if (image_ci.format != VK_FORMAT_UNDEFINED) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-mipLevels-02255"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } else { printf("%s Cannot find a format to test maxMipLevels limit; skipping part of test.\n", kSkipPrefix); } } { VkImageCreateInfo image_ci = safe_image_ci; VkImageFormatProperties img_limits; ASSERT_VK_SUCCESS(GPDIFPHelper(gpu(), &image_ci, &img_limits)); if (img_limits.maxArrayLayers != UINT32_MAX) { image_ci.arrayLayers = img_limits.maxArrayLayers + 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-arrayLayers-02256"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } else { printf("%s VkImageFormatProperties::maxArrayLayers is already UINT32_MAX; skipping part of test.\n", kSkipPrefix); } } { VkImageCreateInfo image_ci = safe_image_ci; bool found = FindFormatWithoutSamples(gpu(), image_ci); if (found) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-samples-02258"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } else { printf("%s Could not find a format with some unsupported samples; skipping part of test.\n", kSkipPrefix); } } { VkImageCreateInfo image_ci = safe_image_ci; image_ci.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; // (any attachment bit) VkImageFormatProperties img_limits; ASSERT_VK_SUCCESS(GPDIFPHelper(gpu(), &image_ci, &img_limits)); if (dev_limits.maxFramebufferWidth != UINT32_MAX) { image_ci.extent = {dev_limits.maxFramebufferWidth + 1, 64, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-usage-00964"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } else { printf("%s VkPhysicalDeviceLimits::maxFramebufferWidth is already UINT32_MAX; skipping part of test.\n", kSkipPrefix); } if (dev_limits.maxFramebufferHeight != UINT32_MAX) { image_ci.usage = VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT; // try different one too image_ci.extent = {64, dev_limits.maxFramebufferHeight + 1, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-usage-00965"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } else { printf("%s VkPhysicalDeviceLimits::maxFramebufferHeight is already UINT32_MAX; skipping part of test.\n", kSkipPrefix); } } } bool FindUnsupportedImage(VkPhysicalDevice gpu, VkImageCreateInfo &image_ci) { const VkFormat first_vk_format = static_cast(1); const VkFormat last_vk_format = static_cast(130); // avoid compressed/feature protected, otherwise 184 const std::vector tilings = {VK_IMAGE_TILING_LINEAR, VK_IMAGE_TILING_OPTIMAL}; for (const auto tiling : tilings) { image_ci.tiling = tiling; for (VkFormat format = first_vk_format; format <= last_vk_format; format = static_cast(format + 1)) { image_ci.format = format; VkFormatProperties format_props; vkGetPhysicalDeviceFormatProperties(gpu, format, &format_props); const VkFormatFeatureFlags core_filter = 0x1FFF; const auto features = (tiling == VK_IMAGE_TILING_LINEAR) ? format_props.linearTilingFeatures & core_filter : format_props.optimalTilingFeatures & core_filter; if (!(features & core_filter)) continue; // We wand supported by features, but not by ImageFormatProperties // get as many usage flags as possible image_ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; if (features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) image_ci.usage |= VK_IMAGE_USAGE_SAMPLED_BIT; if (features & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT) image_ci.usage |= VK_IMAGE_USAGE_STORAGE_BIT; if (features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT) image_ci.usage |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; if (features & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) image_ci.usage |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT; VkImageFormatProperties img_limits; if (VK_ERROR_FORMAT_NOT_SUPPORTED == GPDIFPHelper(gpu, &image_ci, &img_limits)) { return true; } } } return false; } VkFormat FindFormatWithoutFeatures(VkPhysicalDevice gpu, VkImageTiling tiling, VkFormatFeatureFlags undesired_features = UINT32_MAX) { const VkFormat first_vk_format = static_cast(1); const VkFormat last_vk_format = static_cast(130); // avoid compressed/feature protected, otherwise 184 for (VkFormat format = first_vk_format; format <= last_vk_format; format = static_cast(format + 1)) { VkFormatProperties format_props; vkGetPhysicalDeviceFormatProperties(gpu, format, &format_props); const VkFormatFeatureFlags core_filter = 0x1FFF; const auto features = (tiling == VK_IMAGE_TILING_LINEAR) ? format_props.linearTilingFeatures & core_filter : format_props.optimalTilingFeatures & core_filter; const auto valid_features = features & core_filter; if (undesired_features == UINT32_MAX) { if (!valid_features) return format; } else { if (valid_features && !(valid_features & undesired_features)) return format; } } return VK_FORMAT_UNDEFINED; } TEST_F(VkLayerTest, CopyImageTypeExtentMismatch) { // Image copy tests where format type and extents don't match ASSERT_NO_FATAL_FAILURE(Init()); VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_1D; ci.format = VK_FORMAT_R8G8B8A8_UNORM; ci.extent = {32, 1, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Create 1D image VkImageObj image_1D(m_device); image_1D.init(&ci); ASSERT_TRUE(image_1D.initialized()); // 2D image ci.imageType = VK_IMAGE_TYPE_2D; ci.extent = {32, 32, 1}; VkImageObj image_2D(m_device); image_2D.init(&ci); ASSERT_TRUE(image_2D.initialized()); // 3D image ci.imageType = VK_IMAGE_TYPE_3D; ci.extent = {32, 32, 8}; VkImageObj image_3D(m_device); image_3D.init(&ci); ASSERT_TRUE(image_3D.initialized()); // 2D image array ci.imageType = VK_IMAGE_TYPE_2D; ci.extent = {32, 32, 1}; ci.arrayLayers = 8; VkImageObj image_2D_array(m_device); image_2D_array.init(&ci); ASSERT_TRUE(image_2D_array.initialized()); m_commandBuffer->begin(); VkImageCopy copy_region; copy_region.extent = {32, 1, 1}; copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.srcSubresource.mipLevel = 0; copy_region.dstSubresource.mipLevel = 0; copy_region.srcSubresource.baseArrayLayer = 0; copy_region.dstSubresource.baseArrayLayer = 0; copy_region.srcSubresource.layerCount = 1; copy_region.dstSubresource.layerCount = 1; copy_region.srcOffset = {0, 0, 0}; copy_region.dstOffset = {0, 0, 0}; // Sanity check m_errorMonitor->ExpectSuccess(); m_commandBuffer->CopyImage(image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyNotFound(); // 1D texture w/ offset.y > 0. Source = VU 09c00124, dest = 09c00130 copy_region.srcOffset.y = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-00146"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcOffset-00145"); // also y-dim overrun m_commandBuffer->CopyImage(image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); copy_region.srcOffset.y = 0; copy_region.dstOffset.y = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-00152"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstOffset-00151"); // also y-dim overrun m_commandBuffer->CopyImage(image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); copy_region.dstOffset.y = 0; // 1D texture w/ extent.height > 1. Source = VU 09c00124, dest = 09c00130 copy_region.extent.height = 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-00146"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcOffset-00145"); // also y-dim overrun m_commandBuffer->CopyImage(image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-00152"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstOffset-00151"); // also y-dim overrun m_commandBuffer->CopyImage(image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); copy_region.extent.height = 1; // 1D texture w/ offset.z > 0. Source = VU 09c00df2, dest = 09c00df4 copy_region.srcOffset.z = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-01785"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcOffset-00147"); // also z-dim overrun m_commandBuffer->CopyImage(image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); copy_region.srcOffset.z = 0; copy_region.dstOffset.z = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-01786"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstOffset-00153"); // also z-dim overrun m_commandBuffer->CopyImage(image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); copy_region.dstOffset.z = 0; // 1D texture w/ extent.depth > 1. Source = VU 09c00df2, dest = 09c00df4 copy_region.extent.depth = 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-01785"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcOffset-00147"); // also z-dim overrun (src) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstOffset-00153"); // also z-dim overrun (dst) m_commandBuffer->CopyImage(image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-01786"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcOffset-00147"); // also z-dim overrun (src) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstOffset-00153"); // also z-dim overrun (dst) m_commandBuffer->CopyImage(image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); copy_region.extent.depth = 1; // 2D texture w/ offset.z > 0. Source = VU 09c00df6, dest = 09c00df8 copy_region.extent = {16, 16, 1}; copy_region.srcOffset.z = 4; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-01787"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcOffset-00147"); // also z-dim overrun (src) m_commandBuffer->CopyImage(image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, image_3D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); copy_region.srcOffset.z = 0; copy_region.dstOffset.z = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-01788"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstOffset-00153"); // also z-dim overrun (dst) m_commandBuffer->CopyImage(image_3D.image(), VK_IMAGE_LAYOUT_GENERAL, image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); copy_region.dstOffset.z = 0; // 3D texture accessing an array layer other than 0. VU 09c0011a copy_region.extent = {4, 4, 1}; copy_region.srcSubresource.baseArrayLayer = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-00141"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcSubresource-01698"); // also 'too many layers' m_commandBuffer->CopyImage(image_3D.image(), VK_IMAGE_LAYOUT_GENERAL, image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, CopyImageTypeExtentMismatchMaintenance1) { // Image copy tests where format type and extents don't match and the Maintenance1 extension is enabled ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); } else { printf("%s Maintenance1 extension cannot be enabled, test skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkFormat image_format = VK_FORMAT_R8G8B8A8_UNORM; VkFormatProperties format_props; // TODO: Remove this check if or when devsim handles extensions. // The chosen format has mandatory support the transfer src and dst format features when Maitenance1 is enabled. However, our // use of devsim and the mock ICD violate this guarantee. vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), image_format, &format_props); if (!(format_props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_SRC_BIT)) { printf("%s Maintenance1 extension is not supported.\n", kSkipPrefix); return; } VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_1D; ci.format = image_format; ci.extent = {32, 1, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Create 1D image VkImageObj image_1D(m_device); image_1D.init(&ci); ASSERT_TRUE(image_1D.initialized()); // 2D image ci.imageType = VK_IMAGE_TYPE_2D; ci.extent = {32, 32, 1}; VkImageObj image_2D(m_device); image_2D.init(&ci); ASSERT_TRUE(image_2D.initialized()); // 3D image ci.imageType = VK_IMAGE_TYPE_3D; ci.extent = {32, 32, 8}; VkImageObj image_3D(m_device); image_3D.init(&ci); ASSERT_TRUE(image_3D.initialized()); // 2D image array ci.imageType = VK_IMAGE_TYPE_2D; ci.extent = {32, 32, 1}; ci.arrayLayers = 8; VkImageObj image_2D_array(m_device); image_2D_array.init(&ci); ASSERT_TRUE(image_2D_array.initialized()); m_commandBuffer->begin(); VkImageCopy copy_region; copy_region.extent = {32, 1, 1}; copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.srcSubresource.mipLevel = 0; copy_region.dstSubresource.mipLevel = 0; copy_region.srcSubresource.baseArrayLayer = 0; copy_region.dstSubresource.baseArrayLayer = 0; copy_region.srcSubresource.layerCount = 1; copy_region.dstSubresource.layerCount = 1; copy_region.srcOffset = {0, 0, 0}; copy_region.dstOffset = {0, 0, 0}; // Copy from layer not present copy_region.srcSubresource.baseArrayLayer = 4; copy_region.srcSubresource.layerCount = 6; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcSubresource-01698"); m_commandBuffer->CopyImage(image_2D_array.image(), VK_IMAGE_LAYOUT_GENERAL, image_3D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); copy_region.srcSubresource.baseArrayLayer = 0; copy_region.srcSubresource.layerCount = 1; // Copy to layer not present copy_region.dstSubresource.baseArrayLayer = 1; copy_region.dstSubresource.layerCount = 8; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-dstSubresource-01699"); m_commandBuffer->CopyImage(image_3D.image(), VK_IMAGE_LAYOUT_GENERAL, image_2D_array.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); copy_region.dstSubresource.layerCount = 1; m_commandBuffer->end(); } TEST_F(VkLayerTest, CopyImageCompressedBlockAlignment) { // Image copy tests on compressed images with block alignment errors SetTargetApiVersion(VK_API_VERSION_1_1); ASSERT_NO_FATAL_FAILURE(Init()); // Select a compressed format and verify support VkPhysicalDeviceFeatures device_features = {}; ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features)); VkFormat compressed_format = VK_FORMAT_UNDEFINED; if (device_features.textureCompressionBC) { compressed_format = VK_FORMAT_BC3_SRGB_BLOCK; } else if (device_features.textureCompressionETC2) { compressed_format = VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK; } else if (device_features.textureCompressionASTC_LDR) { compressed_format = VK_FORMAT_ASTC_4x4_UNORM_BLOCK; } VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = compressed_format; ci.extent = {64, 64, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; VkImageFormatProperties img_prop = {}; if (VK_SUCCESS != vkGetPhysicalDeviceImageFormatProperties(m_device->phy().handle(), ci.format, ci.imageType, ci.tiling, ci.usage, ci.flags, &img_prop)) { printf("%s No compressed formats supported - CopyImageCompressedBlockAlignment skipped.\n", kSkipPrefix); return; } // Create images VkImageObj image_1(m_device); image_1.init(&ci); ASSERT_TRUE(image_1.initialized()); ci.extent = {62, 62, 1}; // slightly smaller and not divisible by block size VkImageObj image_2(m_device); image_2.init(&ci); ASSERT_TRUE(image_2.initialized()); m_commandBuffer->begin(); VkImageCopy copy_region; copy_region.extent = {48, 48, 1}; copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.srcSubresource.mipLevel = 0; copy_region.dstSubresource.mipLevel = 0; copy_region.srcSubresource.baseArrayLayer = 0; copy_region.dstSubresource.baseArrayLayer = 0; copy_region.srcSubresource.layerCount = 1; copy_region.dstSubresource.layerCount = 1; copy_region.srcOffset = {0, 0, 0}; copy_region.dstOffset = {0, 0, 0}; // Sanity check m_errorMonitor->ExpectSuccess(); m_commandBuffer->CopyImage(image_1.image(), VK_IMAGE_LAYOUT_GENERAL, image_2.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyNotFound(); std::string vuid; bool ycbcr = (DeviceExtensionEnabled(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME) || (DeviceValidationVersion() >= VK_API_VERSION_1_1)); // Src, Dest offsets must be multiples of compressed block sizes {4, 4, 1} // Image transfer granularity gets set to compressed block size, so an ITG error is also (unavoidably) triggered. vuid = ycbcr ? "VUID-VkImageCopy-srcImage-01727" : "VUID-VkImageCopy-srcOffset-00157"; copy_region.srcOffset = {2, 4, 0}; // source width m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcOffset-01783"); // srcOffset image transfer granularity m_commandBuffer->CopyImage(image_1.image(), VK_IMAGE_LAYOUT_GENERAL, image_2.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); copy_region.srcOffset = {12, 1, 0}; // source height m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcOffset-01783"); // srcOffset image transfer granularity m_commandBuffer->CopyImage(image_1.image(), VK_IMAGE_LAYOUT_GENERAL, image_2.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); copy_region.srcOffset = {0, 0, 0}; vuid = ycbcr ? "VUID-VkImageCopy-dstImage-01731" : "VUID-VkImageCopy-dstOffset-00162"; copy_region.dstOffset = {1, 0, 0}; // dest width m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-dstOffset-01784"); // dstOffset image transfer granularity m_commandBuffer->CopyImage(image_1.image(), VK_IMAGE_LAYOUT_GENERAL, image_2.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); copy_region.dstOffset = {4, 1, 0}; // dest height m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-dstOffset-01784"); // dstOffset image transfer granularity m_commandBuffer->CopyImage(image_1.image(), VK_IMAGE_LAYOUT_GENERAL, image_2.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); copy_region.dstOffset = {0, 0, 0}; // Copy extent must be multiples of compressed block sizes {4, 4, 1} if not full width/height vuid = ycbcr ? "VUID-VkImageCopy-srcImage-01728" : "VUID-VkImageCopy-extent-00158"; copy_region.extent = {62, 60, 1}; // source width m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcOffset-01783"); // src extent image transfer granularity m_commandBuffer->CopyImage(image_1.image(), VK_IMAGE_LAYOUT_GENERAL, image_2.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); vuid = ycbcr ? "VUID-VkImageCopy-srcImage-01729" : "VUID-VkImageCopy-extent-00159"; copy_region.extent = {60, 62, 1}; // source height m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcOffset-01783"); // src extent image transfer granularity m_commandBuffer->CopyImage(image_1.image(), VK_IMAGE_LAYOUT_GENERAL, image_2.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); vuid = ycbcr ? "VUID-VkImageCopy-dstImage-01732" : "VUID-VkImageCopy-extent-00163"; copy_region.extent = {62, 60, 1}; // dest width m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-dstOffset-01784"); // dst extent image transfer granularity m_commandBuffer->CopyImage(image_2.image(), VK_IMAGE_LAYOUT_GENERAL, image_1.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); vuid = ycbcr ? "VUID-VkImageCopy-dstImage-01733" : "VUID-VkImageCopy-extent-00164"; copy_region.extent = {60, 62, 1}; // dest height m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-dstOffset-01784"); // dst extent image transfer granularity m_commandBuffer->CopyImage(image_2.image(), VK_IMAGE_LAYOUT_GENERAL, image_1.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); // Note: "VUID-VkImageCopy-extent-00160", "VUID-VkImageCopy-extent-00165", "VUID-VkImageCopy-srcImage-01730", // "VUID-VkImageCopy-dstImage-01734" // There are currently no supported compressed formats with a block depth other than 1, // so impossible to create a 'not a multiple' condition for depth. m_commandBuffer->end(); } TEST_F(VkLayerTest, CopyImageSinglePlane422Alignment) { // Image copy tests on single-plane _422 formats with block alignment errors // Enable KHR multiplane req'd extensions bool mp_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION); if (mp_extensions) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); if (mp_extensions) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); } else { printf("%s test requires KHR multiplane extensions, not available. Skipping.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); // Select a _422 format and verify support VkImageCreateInfo ci = {}; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = VK_FORMAT_G8B8G8R8_422_UNORM_KHR; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Verify formats VkFormatFeatureFlags features = VK_FORMAT_FEATURE_TRANSFER_SRC_BIT | VK_FORMAT_FEATURE_TRANSFER_DST_BIT; bool supported = ImageFormatAndFeaturesSupported(instance(), gpu(), ci, features); if (!supported) { printf("%s Single-plane _422 image format not supported. Skipping test.\n", kSkipPrefix); return; // Assume there's low ROI on searching for different mp formats } // Create images ci.extent = {64, 64, 1}; VkImageObj image_422(m_device); image_422.init(&ci); ASSERT_TRUE(image_422.initialized()); ci.extent = {64, 64, 1}; ci.format = VK_FORMAT_R8G8B8A8_UNORM; VkImageObj image_ucmp(m_device); image_ucmp.init(&ci); ASSERT_TRUE(image_ucmp.initialized()); m_commandBuffer->begin(); VkImageCopy copy_region; copy_region.extent = {48, 48, 1}; copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.srcSubresource.mipLevel = 0; copy_region.dstSubresource.mipLevel = 0; copy_region.srcSubresource.baseArrayLayer = 0; copy_region.dstSubresource.baseArrayLayer = 0; copy_region.srcSubresource.layerCount = 1; copy_region.dstSubresource.layerCount = 1; copy_region.srcOffset = {0, 0, 0}; copy_region.dstOffset = {0, 0, 0}; // Src offsets must be multiples of compressed block sizes copy_region.srcOffset = {3, 4, 0}; // source offset x m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-01727"); m_commandBuffer->CopyImage(image_422.image(), VK_IMAGE_LAYOUT_GENERAL, image_ucmp.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); copy_region.srcOffset = {0, 0, 0}; // Dst offsets must be multiples of compressed block sizes copy_region.dstOffset = {1, 0, 0}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-01731"); m_commandBuffer->CopyImage(image_ucmp.image(), VK_IMAGE_LAYOUT_GENERAL, image_422.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); copy_region.dstOffset = {0, 0, 0}; // Copy extent must be multiples of compressed block sizes if not full width/height copy_region.extent = {31, 60, 1}; // 422 source, extent.x m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-01728"); m_commandBuffer->CopyImage(image_422.image(), VK_IMAGE_LAYOUT_GENERAL, image_ucmp.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); // 422 dest, extent.x m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-01732"); m_commandBuffer->CopyImage(image_ucmp.image(), VK_IMAGE_LAYOUT_GENERAL, image_422.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); copy_region.dstOffset = {0, 0, 0}; m_commandBuffer->end(); } TEST_F(VkLayerTest, MultiplaneImageSamplerConversionMismatch) { TEST_DESCRIPTION("Create sampler with ycbcr conversion and use with an image created without ycrcb conversion"); // Enable KHR multiplane req'd extensions bool mp_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_SPEC_VERSION); if (mp_extensions) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } SetTargetApiVersion(VK_API_VERSION_1_1); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); if (mp_extensions) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); } else { printf("%s test requires KHR multiplane extensions, not available. Skipping.\n", kSkipPrefix); return; } // Enable Ycbcr Conversion Features VkPhysicalDeviceSamplerYcbcrConversionFeatures ycbcr_features = {}; ycbcr_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES; ycbcr_features.samplerYcbcrConversion = VK_TRUE; ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &ycbcr_features)); const VkImageCreateInfo ci = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, NULL, 0, VK_IMAGE_TYPE_2D, VK_FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR, {128, 128, 1}, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_LINEAR, VK_IMAGE_USAGE_SAMPLED_BIT, VK_SHARING_MODE_EXCLUSIVE, VK_IMAGE_LAYOUT_UNDEFINED}; // Verify formats bool supported = ImageFormatAndFeaturesSupported(instance(), gpu(), ci, VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT); if (!supported) { printf("%s Multiplane image format not supported. Skipping test.\n", kSkipPrefix); return; } // Create Ycbcr conversion VkSamplerYcbcrConversionCreateInfo ycbcr_create_info = {VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO, NULL, VK_FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR, VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY, VK_SAMPLER_YCBCR_RANGE_ITU_FULL, {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY}, VK_CHROMA_LOCATION_COSITED_EVEN, VK_CHROMA_LOCATION_COSITED_EVEN, VK_FILTER_NEAREST, false}; VkSamplerYcbcrConversion conversion; vkCreateSamplerYcbcrConversion(m_device->handle(), &ycbcr_create_info, nullptr, &conversion); VkSamplerYcbcrConversionInfo ycbcr_info = {}; ycbcr_info.sType = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO; ycbcr_info.conversion = conversion; // Create a sampler using conversion VkSamplerCreateInfo sci = SafeSaneSamplerCreateInfo(); sci.pNext = &ycbcr_info; VkSampler sampler; VkResult err = vkCreateSampler(m_device->device(), &sci, NULL, &sampler); ASSERT_VK_SUCCESS(err); // Create an image without a Ycbcr conversion VkImageObj mpimage(m_device); mpimage.init(&ci); VkImageView view; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = mpimage.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; vkCreateImageView(m_device->device(), &ivci, nullptr, &view); // Use the image and sampler together in a descriptor set OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, &sampler}, }); VkDescriptorImageInfo image_info{}; image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; image_info.imageView = view; image_info.sampler = sampler; // Update the descriptor set expecting to get an error VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &image_info; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-01948"); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); vkDestroySamplerYcbcrConversion(m_device->device(), conversion, nullptr); vkDestroyImageView(m_device->device(), view, NULL); vkDestroySampler(m_device->device(), sampler, nullptr); } TEST_F(VkLayerTest, CopyImageMultiplaneAspectBits) { // Image copy tests on multiplane images with aspect errors // Enable KHR multiplane req'd extensions bool mp_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION); if (mp_extensions) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); if (mp_extensions) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); } else { printf("%s test requires KHR multiplane extensions, not available. Skipping.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); // Select multi-plane formats and verify support VkFormat mp3_format = VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM_KHR; VkFormat mp2_format = VK_FORMAT_G8_B8R8_2PLANE_422_UNORM_KHR; VkImageCreateInfo ci = {}; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = mp2_format; ci.extent = {256, 256, 1}; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Verify formats VkFormatFeatureFlags features = VK_FORMAT_FEATURE_TRANSFER_SRC_BIT | VK_FORMAT_FEATURE_TRANSFER_DST_BIT; bool supported = ImageFormatAndFeaturesSupported(instance(), gpu(), ci, features); ci.format = mp3_format; supported = supported && ImageFormatAndFeaturesSupported(instance(), gpu(), ci, features); if (!supported) { printf("%s Multiplane image formats not supported. Skipping test.\n", kSkipPrefix); return; // Assume there's low ROI on searching for different mp formats } // Create images VkImageObj mp3_image(m_device); mp3_image.init(&ci); ASSERT_TRUE(mp3_image.initialized()); ci.format = mp2_format; VkImageObj mp2_image(m_device); mp2_image.init(&ci); ASSERT_TRUE(mp2_image.initialized()); ci.format = VK_FORMAT_D24_UNORM_S8_UINT; VkImageObj sp_image(m_device); sp_image.init(&ci); ASSERT_TRUE(sp_image.initialized()); m_commandBuffer->begin(); VkImageCopy copy_region; copy_region.extent = {128, 128, 1}; copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR; copy_region.srcSubresource.mipLevel = 0; copy_region.dstSubresource.mipLevel = 0; copy_region.srcSubresource.baseArrayLayer = 0; copy_region.dstSubresource.baseArrayLayer = 0; copy_region.srcSubresource.layerCount = 1; copy_region.dstSubresource.layerCount = 1; copy_region.srcOffset = {0, 0, 0}; copy_region.dstOffset = {0, 0, 0}; m_errorMonitor->SetUnexpectedError("VUID-vkCmdCopyImage-srcImage-00135"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-01552"); m_commandBuffer->CopyImage(mp2_image.image(), VK_IMAGE_LAYOUT_GENERAL, mp3_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_0_BIT_KHR; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-01553"); m_commandBuffer->CopyImage(mp3_image.image(), VK_IMAGE_LAYOUT_GENERAL, mp2_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_1_BIT_KHR; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR; m_errorMonitor->SetUnexpectedError("VUID-vkCmdCopyImage-srcImage-00135"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-01554"); m_commandBuffer->CopyImage(mp3_image.image(), VK_IMAGE_LAYOUT_GENERAL, mp2_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; m_errorMonitor->SetUnexpectedError("VUID-vkCmdCopyImage-srcImage-00135"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-01555"); m_commandBuffer->CopyImage(mp2_image.image(), VK_IMAGE_LAYOUT_GENERAL, mp3_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-01556"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "dest image depth/stencil formats"); // also m_commandBuffer->CopyImage(mp2_image.image(), VK_IMAGE_LAYOUT_GENERAL, sp_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-01557"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "dest image depth/stencil formats"); // also m_commandBuffer->CopyImage(sp_image.image(), VK_IMAGE_LAYOUT_GENERAL, mp3_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, CopyImageSrcSizeExceeded) { // Image copy with source region specified greater than src image size ASSERT_NO_FATAL_FAILURE(Init()); // Create images with full mip chain VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_3D; ci.format = VK_FORMAT_R8G8B8A8_UNORM; ci.extent = {32, 32, 8}; ci.mipLevels = 6; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; VkImageObj src_image(m_device); src_image.init(&ci); ASSERT_TRUE(src_image.initialized()); // Dest image with one more mip level ci.extent = {64, 64, 16}; ci.mipLevels = 7; ci.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT; VkImageObj dst_image(m_device); dst_image.init(&ci); ASSERT_TRUE(dst_image.initialized()); m_commandBuffer->begin(); VkImageCopy copy_region; copy_region.extent = {32, 32, 8}; copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.srcSubresource.mipLevel = 0; copy_region.dstSubresource.mipLevel = 0; copy_region.srcSubresource.baseArrayLayer = 0; copy_region.dstSubresource.baseArrayLayer = 0; copy_region.srcSubresource.layerCount = 1; copy_region.dstSubresource.layerCount = 1; copy_region.srcOffset = {0, 0, 0}; copy_region.dstOffset = {0, 0, 0}; m_errorMonitor->ExpectSuccess(); m_commandBuffer->CopyImage(src_image.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyNotFound(); // Source exceeded in x-dim, VU 01202 copy_region.srcOffset.x = 4; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-pRegions-00122"); // General "contained within" VU m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcOffset-00144"); m_commandBuffer->CopyImage(src_image.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); // Source exceeded in y-dim, VU 01203 copy_region.srcOffset.x = 0; copy_region.extent.height = 48; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-pRegions-00122"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcOffset-00145"); m_commandBuffer->CopyImage(src_image.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); // Source exceeded in z-dim, VU 01204 copy_region.extent = {4, 4, 4}; copy_region.srcSubresource.mipLevel = 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-pRegions-00122"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcOffset-00147"); m_commandBuffer->CopyImage(src_image.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, CopyImageDstSizeExceeded) { // Image copy with dest region specified greater than dest image size ASSERT_NO_FATAL_FAILURE(Init()); // Create images with full mip chain VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_3D; ci.format = VK_FORMAT_R8G8B8A8_UNORM; ci.extent = {32, 32, 8}; ci.mipLevels = 6; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; VkImageObj dst_image(m_device); dst_image.init(&ci); ASSERT_TRUE(dst_image.initialized()); // Src image with one more mip level ci.extent = {64, 64, 16}; ci.mipLevels = 7; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; VkImageObj src_image(m_device); src_image.init(&ci); ASSERT_TRUE(src_image.initialized()); m_commandBuffer->begin(); VkImageCopy copy_region; copy_region.extent = {32, 32, 8}; copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.srcSubresource.mipLevel = 0; copy_region.dstSubresource.mipLevel = 0; copy_region.srcSubresource.baseArrayLayer = 0; copy_region.dstSubresource.baseArrayLayer = 0; copy_region.srcSubresource.layerCount = 1; copy_region.dstSubresource.layerCount = 1; copy_region.srcOffset = {0, 0, 0}; copy_region.dstOffset = {0, 0, 0}; m_errorMonitor->ExpectSuccess(); m_commandBuffer->CopyImage(src_image.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyNotFound(); // Dest exceeded in x-dim, VU 01205 copy_region.dstOffset.x = 4; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-pRegions-00123"); // General "contained within" VU m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstOffset-00150"); m_commandBuffer->CopyImage(src_image.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); // Dest exceeded in y-dim, VU 01206 copy_region.dstOffset.x = 0; copy_region.extent.height = 48; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-pRegions-00123"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstOffset-00151"); m_commandBuffer->CopyImage(src_image.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); // Dest exceeded in z-dim, VU 01207 copy_region.extent = {4, 4, 4}; copy_region.dstSubresource.mipLevel = 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-pRegions-00123"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstOffset-00153"); m_commandBuffer->CopyImage(src_image.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, CopyImageFormatSizeMismatch) { VkResult err; bool pass; // Create color images with different format sizes and try to copy between them m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcImage-00135"); SetTargetApiVersion(VK_API_VERSION_1_1); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); // Create two images of different types and try to copy between them VkImage srcImage; VkImage dstImage; VkDeviceMemory srcMem; VkDeviceMemory destMem; VkMemoryRequirements memReqs; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 32; image_create_info.extent.height = 32; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_LINEAR; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image_create_info.flags = 0; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &srcImage); ASSERT_VK_SUCCESS(err); image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT; // Introduce failure by creating second image with a different-sized format. image_create_info.format = VK_FORMAT_R5G5B5A1_UNORM_PACK16; VkFormatProperties properties; vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), image_create_info.format, &properties); if (properties.optimalTilingFeatures == 0) { vkDestroyImage(m_device->device(), srcImage, NULL); printf("%s Image format not supported; skipped.\n", kSkipPrefix); return; } err = vkCreateImage(m_device->device(), &image_create_info, NULL, &dstImage); ASSERT_VK_SUCCESS(err); // Allocate memory VkMemoryAllocateInfo memAlloc = {}; memAlloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memAlloc.pNext = NULL; memAlloc.allocationSize = 0; memAlloc.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), srcImage, &memReqs); memAlloc.allocationSize = memReqs.size; pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &memAlloc, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memAlloc, NULL, &srcMem); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), dstImage, &memReqs); memAlloc.allocationSize = memReqs.size; pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &memAlloc, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memAlloc, NULL, &destMem); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), srcImage, srcMem, 0); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), dstImage, destMem, 0); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); VkImageCopy copyRegion; copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.srcSubresource.mipLevel = 0; copyRegion.srcSubresource.baseArrayLayer = 0; copyRegion.srcSubresource.layerCount = 1; copyRegion.srcOffset.x = 0; copyRegion.srcOffset.y = 0; copyRegion.srcOffset.z = 0; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.dstSubresource.mipLevel = 0; copyRegion.dstSubresource.baseArrayLayer = 0; copyRegion.dstSubresource.layerCount = 1; copyRegion.dstOffset.x = 0; copyRegion.dstOffset.y = 0; copyRegion.dstOffset.z = 0; copyRegion.extent.width = 1; copyRegion.extent.height = 1; copyRegion.extent.depth = 1; m_commandBuffer->CopyImage(srcImage, VK_IMAGE_LAYOUT_GENERAL, dstImage, VK_IMAGE_LAYOUT_GENERAL, 1, ©Region); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); vkDestroyImage(m_device->device(), dstImage, NULL); vkFreeMemory(m_device->device(), destMem, NULL); // Copy to multiplane image with mismatched sizes m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcImage-00135"); VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM; ci.extent = {32, 32, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_LINEAR; ci.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; VkFormatFeatureFlags features = VK_FORMAT_FEATURE_TRANSFER_DST_BIT; bool supported = ImageFormatAndFeaturesSupported(instance(), gpu(), ci, features); bool ycbcr = (DeviceExtensionEnabled(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME) || (DeviceValidationVersion() >= VK_API_VERSION_1_1)); if (!supported || !ycbcr) { printf("%s Image format not supported; skipped multiplanar copy test.\n", kSkipPrefix); vkDestroyImage(m_device->device(), srcImage, NULL); vkFreeMemory(m_device->device(), srcMem, NULL); return; } VkImageObj mpImage(m_device); mpImage.init(&ci); ASSERT_TRUE(mpImage.initialized()); copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_0_BIT; vkResetCommandBuffer(m_commandBuffer->handle(), 0); m_commandBuffer->begin(); m_commandBuffer->CopyImage(srcImage, VK_IMAGE_LAYOUT_GENERAL, mpImage.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ©Region); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); vkDestroyImage(m_device->device(), srcImage, NULL); vkFreeMemory(m_device->device(), srcMem, NULL); } TEST_F(VkLayerTest, CopyImageDepthStencilFormatMismatch) { ASSERT_NO_FATAL_FAILURE(Init()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s Couldn't depth stencil image format.\n", kSkipPrefix); return; } VkFormatProperties properties; vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), VK_FORMAT_D32_SFLOAT, &properties); if (properties.optimalTilingFeatures == 0) { printf("%s Image format not supported; skipped.\n", kSkipPrefix); return; } VkImageObj srcImage(m_device); srcImage.Init(32, 32, 1, VK_FORMAT_D32_SFLOAT, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(srcImage.initialized()); VkImageObj dstImage(m_device); dstImage.Init(32, 32, 1, depth_format, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(dstImage.initialized()); // Create two images of different types and try to copy between them m_commandBuffer->begin(); VkImageCopy copyRegion; copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; copyRegion.srcSubresource.mipLevel = 0; copyRegion.srcSubresource.baseArrayLayer = 0; copyRegion.srcSubresource.layerCount = 1; copyRegion.srcOffset.x = 0; copyRegion.srcOffset.y = 0; copyRegion.srcOffset.z = 0; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; copyRegion.dstSubresource.mipLevel = 0; copyRegion.dstSubresource.baseArrayLayer = 0; copyRegion.dstSubresource.layerCount = 1; copyRegion.dstOffset.x = 0; copyRegion.dstOffset.y = 0; copyRegion.dstOffset.z = 0; copyRegion.extent.width = 1; copyRegion.extent.height = 1; copyRegion.extent.depth = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdCopyImage called with unmatched source and dest image depth"); m_commandBuffer->CopyImage(srcImage.handle(), VK_IMAGE_LAYOUT_GENERAL, dstImage.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ©Region); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CopyImageSampleCountMismatch) { TEST_DESCRIPTION("Image copies with sample count mis-matches"); ASSERT_NO_FATAL_FAILURE(Init()); VkImageFormatProperties image_format_properties; vkGetPhysicalDeviceImageFormatProperties(gpu(), VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, 0, &image_format_properties); if ((0 == (VK_SAMPLE_COUNT_2_BIT & image_format_properties.sampleCounts)) || (0 == (VK_SAMPLE_COUNT_4_BIT & image_format_properties.sampleCounts))) { printf("%s Image multi-sample support not found; skipped.\n", kSkipPrefix); return; } VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = VK_FORMAT_R8G8B8A8_UNORM; ci.extent = {128, 128, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; VkImageObj image1(m_device); image1.init(&ci); ASSERT_TRUE(image1.initialized()); ci.samples = VK_SAMPLE_COUNT_2_BIT; VkImageObj image2(m_device); image2.init(&ci); ASSERT_TRUE(image2.initialized()); ci.samples = VK_SAMPLE_COUNT_4_BIT; VkImageObj image4(m_device); image4.init(&ci); ASSERT_TRUE(image4.initialized()); m_commandBuffer->begin(); VkImageCopy copyRegion; copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.srcSubresource.mipLevel = 0; copyRegion.srcSubresource.baseArrayLayer = 0; copyRegion.srcSubresource.layerCount = 1; copyRegion.srcOffset = {0, 0, 0}; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.dstSubresource.mipLevel = 0; copyRegion.dstSubresource.baseArrayLayer = 0; copyRegion.dstSubresource.layerCount = 1; copyRegion.dstOffset = {0, 0, 0}; copyRegion.extent = {128, 128, 1}; // Copy a single sample image to/from a multi-sample image m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcImage-00136"); vkCmdCopyImage(m_commandBuffer->handle(), image1.handle(), VK_IMAGE_LAYOUT_GENERAL, image4.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ©Region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcImage-00136"); vkCmdCopyImage(m_commandBuffer->handle(), image2.handle(), VK_IMAGE_LAYOUT_GENERAL, image1.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ©Region); m_errorMonitor->VerifyFound(); // Copy between multi-sample images with different sample counts m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcImage-00136"); vkCmdCopyImage(m_commandBuffer->handle(), image2.handle(), VK_IMAGE_LAYOUT_GENERAL, image4.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ©Region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcImage-00136"); vkCmdCopyImage(m_commandBuffer->handle(), image4.handle(), VK_IMAGE_LAYOUT_GENERAL, image2.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ©Region); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, CopyImageAspectMismatch) { TEST_DESCRIPTION("Image copies with aspect mask errors"); SetTargetApiVersion(VK_API_VERSION_1_1); ASSERT_NO_FATAL_FAILURE(Init()); auto ds_format = FindSupportedDepthStencilFormat(gpu()); if (!ds_format) { printf("%s Couldn't find depth stencil format.\n", kSkipPrefix); return; } VkFormatProperties properties; vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), VK_FORMAT_D32_SFLOAT, &properties); if (properties.optimalTilingFeatures == 0) { printf("%s Image format VK_FORMAT_D32_SFLOAT not supported; skipped.\n", kSkipPrefix); return; } VkImageObj color_image(m_device), ds_image(m_device), depth_image(m_device); color_image.Init(128, 128, 1, VK_FORMAT_R32_SFLOAT, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT); depth_image.Init(128, 128, 1, VK_FORMAT_D32_SFLOAT, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ds_image.Init(128, 128, 1, ds_format, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(color_image.initialized()); ASSERT_TRUE(depth_image.initialized()); ASSERT_TRUE(ds_image.initialized()); VkImageCopy copyRegion; copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; copyRegion.srcSubresource.mipLevel = 0; copyRegion.srcSubresource.baseArrayLayer = 0; copyRegion.srcSubresource.layerCount = 1; copyRegion.srcOffset = {0, 0, 0}; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; copyRegion.dstSubresource.mipLevel = 0; copyRegion.dstSubresource.baseArrayLayer = 0; copyRegion.dstSubresource.layerCount = 1; copyRegion.dstOffset = {64, 0, 0}; copyRegion.extent = {64, 128, 1}; // Submitting command before command buffer is in recording state m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "You must call vkBeginCommandBuffer"); // "VUID-vkCmdCopyImage-commandBuffer-recording"); vkCmdCopyImage(m_commandBuffer->handle(), depth_image.handle(), VK_IMAGE_LAYOUT_GENERAL, depth_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ©Region); m_errorMonitor->VerifyFound(); m_commandBuffer->begin(); // Src and dest aspect masks don't match copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT; bool ycbcr = (DeviceExtensionEnabled(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME) || (DeviceValidationVersion() >= VK_API_VERSION_1_1)); std::string vuid = (ycbcr ? "VUID-VkImageCopy-srcImage-01551" : "VUID-VkImageCopy-aspectMask-00137"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); vkCmdCopyImage(m_commandBuffer->handle(), ds_image.handle(), VK_IMAGE_LAYOUT_GENERAL, ds_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ©Region); m_errorMonitor->VerifyFound(); copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; // Illegal combinations of aspect bits copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT; // color must be alone copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresourceLayers-aspectMask-00167"); // These aspect/format mismatches are redundant but unavoidable here m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-aspectMask-00142"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); vkCmdCopyImage(m_commandBuffer->handle(), color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ©Region); m_errorMonitor->VerifyFound(); // same test for dstSubresource copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT; // color must be alone m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresourceLayers-aspectMask-00167"); // These aspect/format mismatches are redundant but unavoidable here m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-aspectMask-00143"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); vkCmdCopyImage(m_commandBuffer->handle(), color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ©Region); m_errorMonitor->VerifyFound(); // Metadata aspect is illegal copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_METADATA_BIT; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresourceLayers-aspectMask-00168"); // These aspect/format mismatches are redundant but unavoidable here m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); vkCmdCopyImage(m_commandBuffer->handle(), color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ©Region); m_errorMonitor->VerifyFound(); // same test for dstSubresource copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_METADATA_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresourceLayers-aspectMask-00168"); // These aspect/format mismatches are redundant but unavoidable here m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); vkCmdCopyImage(m_commandBuffer->handle(), color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ©Region); m_errorMonitor->VerifyFound(); copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; // Aspect mask doesn't match source image format m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-aspectMask-00142"); // Again redundant but unavoidable m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "unmatched source and dest image depth/stencil formats"); vkCmdCopyImage(m_commandBuffer->handle(), color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, depth_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ©Region); m_errorMonitor->VerifyFound(); // Aspect mask doesn't match dest image format copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-aspectMask-00143"); // Again redundant but unavoidable m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "unmatched source and dest image depth/stencil formats"); vkCmdCopyImage(m_commandBuffer->handle(), color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, depth_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ©Region); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, ResolveImageLowSampleCount) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdResolveImage called with source sample count less than 2."); ASSERT_NO_FATAL_FAILURE(Init()); // Create two images of sample count 1 and try to Resolve between them VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 32; image_create_info.extent.height = 1; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; image_create_info.flags = 0; VkImageObj srcImage(m_device); srcImage.init(&image_create_info); ASSERT_TRUE(srcImage.initialized()); VkImageObj dstImage(m_device); dstImage.init(&image_create_info); ASSERT_TRUE(dstImage.initialized()); m_commandBuffer->begin(); VkImageResolve resolveRegion; resolveRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.srcSubresource.mipLevel = 0; resolveRegion.srcSubresource.baseArrayLayer = 0; resolveRegion.srcSubresource.layerCount = 1; resolveRegion.srcOffset.x = 0; resolveRegion.srcOffset.y = 0; resolveRegion.srcOffset.z = 0; resolveRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.dstSubresource.mipLevel = 0; resolveRegion.dstSubresource.baseArrayLayer = 0; resolveRegion.dstSubresource.layerCount = 1; resolveRegion.dstOffset.x = 0; resolveRegion.dstOffset.y = 0; resolveRegion.dstOffset.z = 0; resolveRegion.extent.width = 1; resolveRegion.extent.height = 1; resolveRegion.extent.depth = 1; m_commandBuffer->ResolveImage(srcImage.handle(), VK_IMAGE_LAYOUT_GENERAL, dstImage.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &resolveRegion); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ResolveImageHighSampleCount) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdResolveImage called with dest sample count greater than 1."); ASSERT_NO_FATAL_FAILURE(Init()); // Create two images of sample count 4 and try to Resolve between them VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 32; image_create_info.extent.height = 1; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_4_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; // Note: Some implementations expect color attachment usage for any // multisample surface image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; image_create_info.flags = 0; VkImageObj srcImage(m_device); srcImage.init(&image_create_info); ASSERT_TRUE(srcImage.initialized()); VkImageObj dstImage(m_device); dstImage.init(&image_create_info); ASSERT_TRUE(dstImage.initialized()); m_commandBuffer->begin(); // Need memory barrier to VK_IMAGE_LAYOUT_GENERAL for source and dest? // VK_IMAGE_LAYOUT_UNDEFINED = 0, // VK_IMAGE_LAYOUT_GENERAL = 1, VkImageResolve resolveRegion; resolveRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.srcSubresource.mipLevel = 0; resolveRegion.srcSubresource.baseArrayLayer = 0; resolveRegion.srcSubresource.layerCount = 1; resolveRegion.srcOffset.x = 0; resolveRegion.srcOffset.y = 0; resolveRegion.srcOffset.z = 0; resolveRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.dstSubresource.mipLevel = 0; resolveRegion.dstSubresource.baseArrayLayer = 0; resolveRegion.dstSubresource.layerCount = 1; resolveRegion.dstOffset.x = 0; resolveRegion.dstOffset.y = 0; resolveRegion.dstOffset.z = 0; resolveRegion.extent.width = 1; resolveRegion.extent.height = 1; resolveRegion.extent.depth = 1; m_commandBuffer->ResolveImage(srcImage.handle(), VK_IMAGE_LAYOUT_GENERAL, dstImage.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &resolveRegion); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ResolveImageFormatMismatch) { VkResult err; bool pass; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "vkCmdResolveImage called with unmatched source and dest formats."); ASSERT_NO_FATAL_FAILURE(Init()); // Create two images of different types and try to copy between them VkImage srcImage; VkImage dstImage; VkDeviceMemory srcMem; VkDeviceMemory destMem; VkMemoryRequirements memReqs; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 32; image_create_info.extent.height = 1; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_2_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; // Note: Some implementations expect color attachment usage for any // multisample surface image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; image_create_info.flags = 0; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &srcImage); ASSERT_VK_SUCCESS(err); // Set format to something other than source image image_create_info.format = VK_FORMAT_R32_SFLOAT; // Note: Some implementations expect color attachment usage for any // multisample surface image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &dstImage); ASSERT_VK_SUCCESS(err); // Allocate memory VkMemoryAllocateInfo memAlloc = {}; memAlloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memAlloc.pNext = NULL; memAlloc.allocationSize = 0; memAlloc.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), srcImage, &memReqs); memAlloc.allocationSize = memReqs.size; pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &memAlloc, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memAlloc, NULL, &srcMem); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), dstImage, &memReqs); memAlloc.allocationSize = memReqs.size; pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &memAlloc, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memAlloc, NULL, &destMem); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), srcImage, srcMem, 0); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), dstImage, destMem, 0); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); // Need memory barrier to VK_IMAGE_LAYOUT_GENERAL for source and dest? // VK_IMAGE_LAYOUT_UNDEFINED = 0, // VK_IMAGE_LAYOUT_GENERAL = 1, VkImageResolve resolveRegion; resolveRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.srcSubresource.mipLevel = 0; resolveRegion.srcSubresource.baseArrayLayer = 0; resolveRegion.srcSubresource.layerCount = 1; resolveRegion.srcOffset.x = 0; resolveRegion.srcOffset.y = 0; resolveRegion.srcOffset.z = 0; resolveRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.dstSubresource.mipLevel = 0; resolveRegion.dstSubresource.baseArrayLayer = 0; resolveRegion.dstSubresource.layerCount = 1; resolveRegion.dstOffset.x = 0; resolveRegion.dstOffset.y = 0; resolveRegion.dstOffset.z = 0; resolveRegion.extent.width = 1; resolveRegion.extent.height = 1; resolveRegion.extent.depth = 1; m_commandBuffer->ResolveImage(srcImage, VK_IMAGE_LAYOUT_GENERAL, dstImage, VK_IMAGE_LAYOUT_GENERAL, 1, &resolveRegion); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); vkDestroyImage(m_device->device(), srcImage, NULL); vkDestroyImage(m_device->device(), dstImage, NULL); vkFreeMemory(m_device->device(), srcMem, NULL); vkFreeMemory(m_device->device(), destMem, NULL); } TEST_F(VkLayerTest, ResolveImageTypeMismatch) { VkResult err; bool pass; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "vkCmdResolveImage called with unmatched source and dest image types."); ASSERT_NO_FATAL_FAILURE(Init()); // Create two images of different types and try to copy between them VkImage srcImage; VkImage dstImage; VkDeviceMemory srcMem; VkDeviceMemory destMem; VkMemoryRequirements memReqs; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 32; image_create_info.extent.height = 1; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_2_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; // Note: Some implementations expect color attachment usage for any // multisample surface image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; image_create_info.flags = 0; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &srcImage); ASSERT_VK_SUCCESS(err); image_create_info.imageType = VK_IMAGE_TYPE_1D; // Note: Some implementations expect color attachment usage for any // multisample surface image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &dstImage); ASSERT_VK_SUCCESS(err); // Allocate memory VkMemoryAllocateInfo memAlloc = {}; memAlloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memAlloc.pNext = NULL; memAlloc.allocationSize = 0; memAlloc.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), srcImage, &memReqs); memAlloc.allocationSize = memReqs.size; pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &memAlloc, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memAlloc, NULL, &srcMem); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), dstImage, &memReqs); memAlloc.allocationSize = memReqs.size; pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &memAlloc, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memAlloc, NULL, &destMem); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), srcImage, srcMem, 0); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), dstImage, destMem, 0); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); // Need memory barrier to VK_IMAGE_LAYOUT_GENERAL for source and dest? // VK_IMAGE_LAYOUT_UNDEFINED = 0, // VK_IMAGE_LAYOUT_GENERAL = 1, VkImageResolve resolveRegion; resolveRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.srcSubresource.mipLevel = 0; resolveRegion.srcSubresource.baseArrayLayer = 0; resolveRegion.srcSubresource.layerCount = 1; resolveRegion.srcOffset.x = 0; resolveRegion.srcOffset.y = 0; resolveRegion.srcOffset.z = 0; resolveRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.dstSubresource.mipLevel = 0; resolveRegion.dstSubresource.baseArrayLayer = 0; resolveRegion.dstSubresource.layerCount = 1; resolveRegion.dstOffset.x = 0; resolveRegion.dstOffset.y = 0; resolveRegion.dstOffset.z = 0; resolveRegion.extent.width = 1; resolveRegion.extent.height = 1; resolveRegion.extent.depth = 1; m_commandBuffer->ResolveImage(srcImage, VK_IMAGE_LAYOUT_GENERAL, dstImage, VK_IMAGE_LAYOUT_GENERAL, 1, &resolveRegion); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); vkDestroyImage(m_device->device(), srcImage, NULL); vkDestroyImage(m_device->device(), dstImage, NULL); vkFreeMemory(m_device->device(), srcMem, NULL); vkFreeMemory(m_device->device(), destMem, NULL); } TEST_F(VkLayerTest, ResolveImageLayoutMismatch) { ASSERT_NO_FATAL_FAILURE(Init()); // Create two images of different types and try to copy between them VkImageObj srcImage(m_device); VkImageObj dstImage(m_device); VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 32; image_create_info.extent.height = 32; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_2_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; // Note: Some implementations expect color attachment usage for any // multisample surface image_create_info.flags = 0; srcImage.init(&image_create_info); ASSERT_TRUE(srcImage.initialized()); // Note: Some implementations expect color attachment usage for any // multisample surface image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; dstImage.init(&image_create_info); ASSERT_TRUE(dstImage.initialized()); m_commandBuffer->begin(); // source image must have valid contents before resolve VkClearColorValue clear_color = {{0, 0, 0, 0}}; VkImageSubresourceRange subresource = {}; subresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; subresource.layerCount = 1; subresource.levelCount = 1; srcImage.SetLayout(m_commandBuffer, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); m_commandBuffer->ClearColorImage(srcImage.image(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear_color, 1, &subresource); srcImage.SetLayout(m_commandBuffer, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); dstImage.SetLayout(m_commandBuffer, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); VkImageResolve resolveRegion; resolveRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.srcSubresource.mipLevel = 0; resolveRegion.srcSubresource.baseArrayLayer = 0; resolveRegion.srcSubresource.layerCount = 1; resolveRegion.srcOffset.x = 0; resolveRegion.srcOffset.y = 0; resolveRegion.srcOffset.z = 0; resolveRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.dstSubresource.mipLevel = 0; resolveRegion.dstSubresource.baseArrayLayer = 0; resolveRegion.dstSubresource.layerCount = 1; resolveRegion.dstOffset.x = 0; resolveRegion.dstOffset.y = 0; resolveRegion.dstOffset.z = 0; resolveRegion.extent.width = 1; resolveRegion.extent.height = 1; resolveRegion.extent.depth = 1; // source image layout mismatch m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdResolveImage-srcImageLayout-00260"); m_commandBuffer->ResolveImage(srcImage.image(), VK_IMAGE_LAYOUT_GENERAL, dstImage.image(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &resolveRegion); m_errorMonitor->VerifyFound(); // dst image layout mismatch m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdResolveImage-dstImageLayout-00262"); m_commandBuffer->ResolveImage(srcImage.image(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dstImage.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &resolveRegion); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, ResolveInvalidSubresource) { ASSERT_NO_FATAL_FAILURE(Init()); // Create two images of different types and try to copy between them VkImageObj srcImage(m_device); VkImageObj dstImage(m_device); VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 32; image_create_info.extent.height = 32; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_2_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; // Note: Some implementations expect color attachment usage for any // multisample surface image_create_info.flags = 0; srcImage.init(&image_create_info); ASSERT_TRUE(srcImage.initialized()); // Note: Some implementations expect color attachment usage for any // multisample surface image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; dstImage.init(&image_create_info); ASSERT_TRUE(dstImage.initialized()); m_commandBuffer->begin(); // source image must have valid contents before resolve VkClearColorValue clear_color = {{0, 0, 0, 0}}; VkImageSubresourceRange subresource = {}; subresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; subresource.layerCount = 1; subresource.levelCount = 1; srcImage.SetLayout(m_commandBuffer, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); m_commandBuffer->ClearColorImage(srcImage.image(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear_color, 1, &subresource); srcImage.SetLayout(m_commandBuffer, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); dstImage.SetLayout(m_commandBuffer, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); VkImageResolve resolveRegion; resolveRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.srcSubresource.mipLevel = 0; resolveRegion.srcSubresource.baseArrayLayer = 0; resolveRegion.srcSubresource.layerCount = 1; resolveRegion.srcOffset.x = 0; resolveRegion.srcOffset.y = 0; resolveRegion.srcOffset.z = 0; resolveRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.dstSubresource.mipLevel = 0; resolveRegion.dstSubresource.baseArrayLayer = 0; resolveRegion.dstSubresource.layerCount = 1; resolveRegion.dstOffset.x = 0; resolveRegion.dstOffset.y = 0; resolveRegion.dstOffset.z = 0; resolveRegion.extent.width = 1; resolveRegion.extent.height = 1; resolveRegion.extent.depth = 1; // invalid source mip level resolveRegion.srcSubresource.mipLevel = image_create_info.mipLevels; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdResolveImage-srcSubresource-01709"); m_commandBuffer->ResolveImage(srcImage.image(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dstImage.image(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &resolveRegion); m_errorMonitor->VerifyFound(); resolveRegion.srcSubresource.mipLevel = 0; // invalid dest mip level resolveRegion.dstSubresource.mipLevel = image_create_info.mipLevels; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdResolveImage-dstSubresource-01710"); m_commandBuffer->ResolveImage(srcImage.image(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dstImage.image(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &resolveRegion); m_errorMonitor->VerifyFound(); resolveRegion.dstSubresource.mipLevel = 0; // invalid source array layer range resolveRegion.srcSubresource.baseArrayLayer = image_create_info.arrayLayers; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdResolveImage-srcSubresource-01711"); m_commandBuffer->ResolveImage(srcImage.image(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dstImage.image(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &resolveRegion); m_errorMonitor->VerifyFound(); resolveRegion.srcSubresource.baseArrayLayer = 0; // invalid dest array layer range resolveRegion.dstSubresource.baseArrayLayer = image_create_info.arrayLayers; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdResolveImage-dstSubresource-01712"); m_commandBuffer->ResolveImage(srcImage.image(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dstImage.image(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &resolveRegion); m_errorMonitor->VerifyFound(); resolveRegion.dstSubresource.baseArrayLayer = 0; m_commandBuffer->end(); } TEST_F(VkLayerTest, DepthStencilImageViewWithColorAspectBitError) { // Create a single Image descriptor and cause it to first hit an error due // to using a DS format, then cause it to hit error due to COLOR_BIT not // set in aspect // The image format check comes 2nd in validation so we trigger it first, // then when we cause aspect fail next, bad format check will be preempted VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Combination depth/stencil image formats can have only the "); ASSERT_NO_FATAL_FAILURE(Init()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s Couldn't find depth stencil format.\n", kSkipPrefix); return; } VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptorSet; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); VkImage image_bad; VkImage image_good; // One bad format and one good format for Color attachment const VkFormat tex_format_bad = depth_format; const VkFormat tex_format_good = VK_FORMAT_B8G8R8A8_UNORM; const int32_t tex_width = 32; const int32_t tex_height = 32; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format_bad; image_create_info.extent.width = tex_width; image_create_info.extent.height = tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT; image_create_info.flags = 0; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image_bad); ASSERT_VK_SUCCESS(err); image_create_info.format = tex_format_good; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image_good); ASSERT_VK_SUCCESS(err); // ---Bind image memory--- VkMemoryRequirements img_mem_reqs; vkGetImageMemoryRequirements(m_device->device(), image_bad, &img_mem_reqs); VkMemoryAllocateInfo image_alloc_info = {}; image_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; image_alloc_info.pNext = NULL; image_alloc_info.memoryTypeIndex = 0; image_alloc_info.allocationSize = img_mem_reqs.size; bool pass = m_device->phy().set_memory_type(img_mem_reqs.memoryTypeBits, &image_alloc_info, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); ASSERT_TRUE(pass); VkDeviceMemory mem; err = vkAllocateMemory(m_device->device(), &image_alloc_info, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), image_bad, mem, 0); ASSERT_VK_SUCCESS(err); // ----------------------- VkImageViewCreateInfo image_view_create_info = {}; image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; image_view_create_info.image = image_bad; image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D; image_view_create_info.format = tex_format_bad; image_view_create_info.subresourceRange.baseArrayLayer = 0; image_view_create_info.subresourceRange.baseMipLevel = 0; image_view_create_info.subresourceRange.layerCount = 1; image_view_create_info.subresourceRange.levelCount = 1; image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT; VkImageView view; err = vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &view); m_errorMonitor->VerifyFound(); vkDestroyImage(m_device->device(), image_bad, NULL); vkDestroyImage(m_device->device(), image_good, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); vkFreeMemory(m_device->device(), mem, NULL); } TEST_F(VkLayerTest, ClearImageErrors) { TEST_DESCRIPTION("Call ClearColorImage w/ a depth|stencil image and ClearDepthStencilImage with a color image."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); // Color image VkClearColorValue clear_color; memset(clear_color.uint32, 0, sizeof(uint32_t) * 4); const VkFormat color_format = VK_FORMAT_B8G8R8A8_UNORM; const int32_t img_width = 32; const int32_t img_height = 32; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = color_format; image_create_info.extent.width = img_width; image_create_info.extent.height = img_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT; vk_testing::Image color_image_no_transfer; color_image_no_transfer.init(*m_device, image_create_info); image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; vk_testing::Image color_image; color_image.init(*m_device, image_create_info); const VkImageSubresourceRange color_range = vk_testing::Image::subresource_range(image_create_info, VK_IMAGE_ASPECT_COLOR_BIT); // Depth/Stencil image VkClearDepthStencilValue clear_value = {0}; VkImageCreateInfo ds_image_create_info = vk_testing::Image::create_info(); ds_image_create_info.imageType = VK_IMAGE_TYPE_2D; ds_image_create_info.format = VK_FORMAT_D16_UNORM; ds_image_create_info.extent.width = 64; ds_image_create_info.extent.height = 64; ds_image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; ds_image_create_info.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; vk_testing::Image ds_image; ds_image.init(*m_device, ds_image_create_info); const VkImageSubresourceRange ds_range = vk_testing::Image::subresource_range(ds_image_create_info, VK_IMAGE_ASPECT_DEPTH_BIT); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdClearColorImage called with depth/stencil image."); vkCmdClearColorImage(m_commandBuffer->handle(), ds_image.handle(), VK_IMAGE_LAYOUT_GENERAL, &clear_color, 1, &color_range); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdClearColorImage called with image created without VK_IMAGE_USAGE_TRANSFER_DST_BIT"); vkCmdClearColorImage(m_commandBuffer->handle(), color_image_no_transfer.handle(), VK_IMAGE_LAYOUT_GENERAL, &clear_color, 1, &color_range); m_errorMonitor->VerifyFound(); // Call CmdClearDepthStencilImage with color image m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdClearDepthStencilImage called without a depth/stencil image."); vkCmdClearDepthStencilImage(m_commandBuffer->handle(), color_image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear_value, 1, &ds_range); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CommandQueueFlags) { TEST_DESCRIPTION( "Allocate a command buffer on a queue that does not support graphics and try to issue a graphics-only command"); ASSERT_NO_FATAL_FAILURE(Init()); uint32_t queueFamilyIndex = m_device->QueueFamilyWithoutCapabilities(VK_QUEUE_GRAPHICS_BIT); if (queueFamilyIndex == UINT32_MAX) { printf("%s Non-graphics queue family not found; skipped.\n", kSkipPrefix); return; } else { // Create command pool on a non-graphics queue VkCommandPoolObj command_pool(m_device, queueFamilyIndex); // Setup command buffer on pool VkCommandBufferObj command_buffer(m_device, &command_pool); command_buffer.begin(); // Issue a graphics only command m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-commandBuffer-cmdpool"); VkViewport viewport = {0, 0, 16, 16, 0, 1}; command_buffer.SetViewport(0, 1, &viewport); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, ExecuteUnrecordedSecondaryCB) { TEST_DESCRIPTION("Attempt vkCmdExecuteCommands with a CB in the initial state"); ASSERT_NO_FATAL_FAILURE(Init()); VkCommandBufferObj secondary(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); // never record secondary m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdExecuteCommands-pCommandBuffers-00089"); m_commandBuffer->begin(); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, ExecuteUnrecordedPrimaryCB) { TEST_DESCRIPTION("Attempt vkQueueSubmit with a CB in the initial state"); ASSERT_NO_FATAL_FAILURE(Init()); // never record m_commandBuffer VkSubmitInfo si = {}; si.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; si.commandBufferCount = 1; si.pCommandBuffers = &m_commandBuffer->handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkQueueSubmit-pCommandBuffers-00072"); vkQueueSubmit(m_device->m_queue, 1, &si, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ExecuteSecondaryCBWithLayoutMismatch) { TEST_DESCRIPTION("Attempt vkCmdExecuteCommands with a CB with incorrect initial layout."); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 32; image_create_info.extent.height = 1; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; image_create_info.flags = 0; VkImageSubresource image_sub = VkImageObj::subresource(VK_IMAGE_ASPECT_COLOR_BIT, 0, 0); VkImageSubresourceRange image_sub_range = VkImageObj::subresource_range(image_sub); VkImageObj image(m_device); image.init(&image_create_info); ASSERT_TRUE(image.initialized()); VkImageMemoryBarrier image_barrier = image.image_memory_barrier(0, 0, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL, image_sub_range); auto pipeline = [&image_barrier](const VkCommandBufferObj &cb, VkImageLayout old_layout, VkImageLayout new_layout) { image_barrier.oldLayout = old_layout; image_barrier.newLayout = new_layout; vkCmdPipelineBarrier(cb.handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_barrier); }; // Validate that mismatched use of image layout in secondary command buffer is caught at record time VkCommandBufferObj secondary(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); secondary.begin(); pipeline(secondary, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); secondary.end(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001"); m_commandBuffer->begin(); pipeline(*m_commandBuffer, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); m_errorMonitor->VerifyFound(); // Validate that we've tracked the changes from the secondary CB correctly m_errorMonitor->ExpectSuccess(); pipeline(*m_commandBuffer, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL); m_errorMonitor->VerifyNotFound(); m_commandBuffer->end(); m_commandBuffer->reset(); secondary.reset(); // Validate that UNDEFINED doesn't false positive on us secondary.begin(); pipeline(secondary, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); secondary.end(); m_commandBuffer->begin(); pipeline(*m_commandBuffer, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); m_errorMonitor->ExpectSuccess(); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); m_errorMonitor->VerifyNotFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, ExtensionNotEnabled) { TEST_DESCRIPTION("Validate that using an API from an unenabled extension returns an error"); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Required extensions except VK_KHR_GET_MEMORY_REQUIREMENTS_2 -- to create the needed error std::vector required_device_extensions = {VK_KHR_MAINTENANCE1_EXTENSION_NAME, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME}; for (auto dev_ext : required_device_extensions) { if (DeviceExtensionSupported(gpu(), nullptr, dev_ext)) { m_device_extension_names.push_back(dev_ext); } else { printf("%s Did not find required device extension %s; skipped.\n", kSkipPrefix, dev_ext); break; } } // Need to ignore this error to get to the one we're testing m_errorMonitor->SetUnexpectedError("VUID-vkCreateDevice-ppEnabledExtensionNames-01387"); ASSERT_NO_FATAL_FAILURE(InitState()); // Find address of extension API auto vkCreateSamplerYcbcrConversionKHR = (PFN_vkCreateSamplerYcbcrConversionKHR)vkGetDeviceProcAddr(m_device->handle(), "vkCreateSamplerYcbcrConversionKHR"); if (vkCreateSamplerYcbcrConversionKHR == nullptr) { printf("%s VK_KHR_sampler_ycbcr_conversion not supported by device; skipped.\n", kSkipPrefix); return; } m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "UNASSIGNED-GeneralParameterError-ExtensionNotEnabled"); VkSamplerYcbcrConversionCreateInfo ycbcr_info = {VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO, NULL, VK_FORMAT_UNDEFINED, VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY, VK_SAMPLER_YCBCR_RANGE_ITU_FULL, {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY}, VK_CHROMA_LOCATION_COSITED_EVEN, VK_CHROMA_LOCATION_COSITED_EVEN, VK_FILTER_NEAREST, false}; VkSamplerYcbcrConversion conversion; vkCreateSamplerYcbcrConversionKHR(m_device->handle(), &ycbcr_info, nullptr, &conversion); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, Maintenance1AndNegativeViewport) { TEST_DESCRIPTION("Attempt to enable AMD_negative_viewport_height and Maintenance1_KHR extension simultaneously"); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (!((DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) && (DeviceExtensionSupported(gpu(), nullptr, VK_AMD_NEGATIVE_VIEWPORT_HEIGHT_EXTENSION_NAME)))) { printf("%s Maintenance1 and AMD_negative viewport height extensions not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); vk_testing::QueueCreateInfoArray queue_info(m_device->queue_props); const char *extension_names[2] = {"VK_KHR_maintenance1", "VK_AMD_negative_viewport_height"}; VkDevice testDevice; VkDeviceCreateInfo device_create_info = {}; auto features = m_device->phy().features(); device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; device_create_info.pNext = NULL; device_create_info.queueCreateInfoCount = queue_info.size(); device_create_info.pQueueCreateInfos = queue_info.data(); device_create_info.enabledLayerCount = 0; device_create_info.ppEnabledLayerNames = NULL; device_create_info.enabledExtensionCount = 2; device_create_info.ppEnabledExtensionNames = (const char *const *)extension_names; device_create_info.pEnabledFeatures = &features; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDeviceCreateInfo-ppEnabledExtensionNames-00374"); // The following unexpected error is coming from the LunarG loader. Do not make it a desired message because platforms that do // not use the LunarG loader (e.g. Android) will not see the message and the test will fail. m_errorMonitor->SetUnexpectedError("Failed to create device chain."); vkCreateDevice(gpu(), &device_create_info, NULL, &testDevice); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidCreateDescriptorPool) { TEST_DESCRIPTION("Attempt to create descriptor pool with invalid parameters"); ASSERT_NO_FATAL_FAILURE(Init()); const uint32_t default_descriptor_count = 1; const VkDescriptorPoolSize dp_size_template{VK_DESCRIPTOR_TYPE_SAMPLER, default_descriptor_count}; const VkDescriptorPoolCreateInfo dp_ci_template{VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO, nullptr, // pNext 0, // flags 1, // maxSets 1, // poolSizeCount &dp_size_template}; // try maxSets = 0 { VkDescriptorPoolCreateInfo invalid_dp_ci = dp_ci_template; invalid_dp_ci.maxSets = 0; // invalid maxSets value m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorPoolCreateInfo-maxSets-00301"); { VkDescriptorPool pool; vkCreateDescriptorPool(m_device->device(), &invalid_dp_ci, nullptr, &pool); } m_errorMonitor->VerifyFound(); } // try descriptorCount = 0 { VkDescriptorPoolSize invalid_dp_size = dp_size_template; invalid_dp_size.descriptorCount = 0; // invalid descriptorCount value VkDescriptorPoolCreateInfo dp_ci = dp_ci_template; dp_ci.pPoolSizes = &invalid_dp_size; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorPoolSize-descriptorCount-00302"); { VkDescriptorPool pool; vkCreateDescriptorPool(m_device->device(), &dp_ci, nullptr, &pool); } m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, InvalidCreateBufferSize) { TEST_DESCRIPTION("Attempt to create VkBuffer with size of zero"); ASSERT_NO_FATAL_FAILURE(Init()); VkBufferCreateInfo info = {}; info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferCreateInfo-size-00912"); info.size = 0; VkBuffer buffer; vkCreateBuffer(m_device->device(), &info, nullptr, &buffer); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, SetDynViewportParamTests) { TEST_DESCRIPTION("Test parameters of vkCmdSetViewport without multiViewport feature"); SetTargetApiVersion(VK_API_VERSION_1_1); VkPhysicalDeviceFeatures features{}; ASSERT_NO_FATAL_FAILURE(Init(&features)); const VkViewport vp = {0.0, 0.0, 64.0, 64.0, 0.0, 1.0}; const VkViewport viewports[] = {vp, vp}; m_commandBuffer->begin(); // array tests m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-firstViewport-01224"); vkCmdSetViewport(m_commandBuffer->handle(), 1, 1, viewports); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-viewportCount-arraylength"); vkCmdSetViewport(m_commandBuffer->handle(), 0, 0, nullptr); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-viewportCount-01225"); vkCmdSetViewport(m_commandBuffer->handle(), 0, 2, viewports); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-firstViewport-01224"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-viewportCount-arraylength"); vkCmdSetViewport(m_commandBuffer->handle(), 1, 0, viewports); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-firstViewport-01224"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-viewportCount-01225"); vkCmdSetViewport(m_commandBuffer->handle(), 1, 2, viewports); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-pViewports-parameter"); vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, nullptr); m_errorMonitor->VerifyFound(); // core viewport tests using std::vector; struct TestCase { VkViewport vp; std::string veid; }; // not necessarily boundary values (unspecified cast rounding), but guaranteed to be over limit const auto one_past_max_w = NearestGreater(static_cast(m_device->props.limits.maxViewportDimensions[0])); const auto one_past_max_h = NearestGreater(static_cast(m_device->props.limits.maxViewportDimensions[1])); const auto min_bound = m_device->props.limits.viewportBoundsRange[0]; const auto max_bound = m_device->props.limits.viewportBoundsRange[1]; const auto one_before_min_bounds = NearestSmaller(min_bound); const auto one_past_max_bounds = NearestGreater(max_bound); const auto below_zero = NearestSmaller(0.0f); const auto past_one = NearestGreater(1.0f); vector test_cases = { {{0.0, 0.0, 0.0, 64.0, 0.0, 1.0}, "VUID-VkViewport-width-01770"}, {{0.0, 0.0, one_past_max_w, 64.0, 0.0, 1.0}, "VUID-VkViewport-width-01771"}, {{0.0, 0.0, NAN, 64.0, 0.0, 1.0}, "VUID-VkViewport-width-01770"}, {{0.0, 0.0, 64.0, one_past_max_h, 0.0, 1.0}, "VUID-VkViewport-height-01773"}, {{one_before_min_bounds, 0.0, 64.0, 64.0, 0.0, 1.0}, "VUID-VkViewport-x-01774"}, {{one_past_max_bounds, 0.0, 64.0, 64.0, 0.0, 1.0}, "VUID-VkViewport-x-01232"}, {{NAN, 0.0, 64.0, 64.0, 0.0, 1.0}, "VUID-VkViewport-x-01774"}, {{0.0, one_before_min_bounds, 64.0, 64.0, 0.0, 1.0}, "VUID-VkViewport-y-01775"}, {{0.0, NAN, 64.0, 64.0, 0.0, 1.0}, "VUID-VkViewport-y-01775"}, {{max_bound, 0.0, 1.0, 64.0, 0.0, 1.0}, "VUID-VkViewport-x-01232"}, {{0.0, max_bound, 64.0, 1.0, 0.0, 1.0}, "VUID-VkViewport-y-01233"}, {{0.0, 0.0, 64.0, 64.0, below_zero, 1.0}, "VUID-VkViewport-minDepth-01234"}, {{0.0, 0.0, 64.0, 64.0, past_one, 1.0}, "VUID-VkViewport-minDepth-01234"}, {{0.0, 0.0, 64.0, 64.0, NAN, 1.0}, "VUID-VkViewport-minDepth-01234"}, {{0.0, 0.0, 64.0, 64.0, 0.0, below_zero}, "VUID-VkViewport-maxDepth-01235"}, {{0.0, 0.0, 64.0, 64.0, 0.0, past_one}, "VUID-VkViewport-maxDepth-01235"}, {{0.0, 0.0, 64.0, 64.0, 0.0, NAN}, "VUID-VkViewport-maxDepth-01235"}, }; if (DeviceValidationVersion() < VK_API_VERSION_1_1) { test_cases.push_back({{0.0, 0.0, 64.0, 0.0, 0.0, 1.0}, "VUID-VkViewport-height-01772"}); test_cases.push_back({{0.0, 0.0, 64.0, NAN, 0.0, 1.0}, "VUID-VkViewport-height-01772"}); } else { test_cases.push_back({{0.0, 0.0, 64.0, NAN, 0.0, 1.0}, "VUID-VkViewport-height-01773"}); } for (const auto &test_case : test_cases) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.veid); vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &test_case.vp); m_errorMonitor->VerifyFound(); } } void NegHeightViewportTests(VkDeviceObj *m_device, VkCommandBufferObj *m_commandBuffer, ErrorMonitor *m_errorMonitor) { const auto &limits = m_device->props.limits; m_commandBuffer->begin(); using std::vector; struct TestCase { VkViewport vp; vector vuids; }; // not necessarily boundary values (unspecified cast rounding), but guaranteed to be over limit const auto one_before_min_h = NearestSmaller(-static_cast(limits.maxViewportDimensions[1])); const auto one_past_max_h = NearestGreater(static_cast(limits.maxViewportDimensions[1])); const auto min_bound = limits.viewportBoundsRange[0]; const auto max_bound = limits.viewportBoundsRange[1]; const auto one_before_min_bound = NearestSmaller(min_bound); const auto one_past_max_bound = NearestGreater(max_bound); const vector test_cases = {{{0.0, 0.0, 64.0, one_before_min_h, 0.0, 1.0}, {"VUID-VkViewport-height-01773"}}, {{0.0, 0.0, 64.0, one_past_max_h, 0.0, 1.0}, {"VUID-VkViewport-height-01773"}}, {{0.0, 0.0, 64.0, NAN, 0.0, 1.0}, {"VUID-VkViewport-height-01773"}}, {{0.0, one_before_min_bound, 64.0, 1.0, 0.0, 1.0}, {"VUID-VkViewport-y-01775"}}, {{0.0, one_past_max_bound, 64.0, -1.0, 0.0, 1.0}, {"VUID-VkViewport-y-01776"}}, {{0.0, min_bound, 64.0, -1.0, 0.0, 1.0}, {"VUID-VkViewport-y-01777"}}, {{0.0, max_bound, 64.0, 1.0, 0.0, 1.0}, {"VUID-VkViewport-y-01233"}}}; for (const auto &test_case : test_cases) { for (const auto vuid : test_case.vuids) { if (vuid == "VUID-Undefined") m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "is less than VkPhysicalDeviceLimits::viewportBoundsRange[0]"); else m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); } vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &test_case.vp); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, SetDynViewportParamMaintenance1Tests) { TEST_DESCRIPTION("Verify errors are detected on misuse of SetViewport with a negative viewport extension enabled."); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); } else { printf("%s VK_KHR_maintenance1 extension not supported -- skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); NegHeightViewportTests(m_device, m_commandBuffer, m_errorMonitor); } TEST_F(VkLayerTest, SetDynViewportParamMultiviewportTests) { TEST_DESCRIPTION("Test parameters of vkCmdSetViewport with multiViewport feature enabled"); ASSERT_NO_FATAL_FAILURE(Init()); if (!m_device->phy().features().multiViewport) { printf("%s VkPhysicalDeviceFeatures::multiViewport is not supported -- skipping test.\n", kSkipPrefix); return; } const auto max_viewports = m_device->props.limits.maxViewports; const uint32_t too_many_viewports = 65536 + 1; // let's say this is too much to allocate pViewports for m_commandBuffer->begin(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-viewportCount-arraylength"); vkCmdSetViewport(m_commandBuffer->handle(), 0, 0, nullptr); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-pViewports-parameter"); vkCmdSetViewport(m_commandBuffer->handle(), 0, max_viewports, nullptr); m_errorMonitor->VerifyFound(); if (max_viewports >= too_many_viewports) { printf( "%s VkPhysicalDeviceLimits::maxViewports is too large to practically test against -- skipping " "part of " "test.\n", kSkipPrefix); return; } const VkViewport vp = {0.0, 0.0, 64.0, 64.0, 0.0, 1.0}; const std::vector viewports(max_viewports + 1, vp); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-firstViewport-01223"); vkCmdSetViewport(m_commandBuffer->handle(), 0, max_viewports + 1, viewports.data()); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-firstViewport-01223"); vkCmdSetViewport(m_commandBuffer->handle(), max_viewports, 1, viewports.data()); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-firstViewport-01223"); vkCmdSetViewport(m_commandBuffer->handle(), 1, max_viewports, viewports.data()); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-viewportCount-arraylength"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-firstViewport-01223"); vkCmdSetViewport(m_commandBuffer->handle(), max_viewports + 1, 0, viewports.data()); m_errorMonitor->VerifyFound(); } // // POSITIVE VALIDATION TESTS // // These tests do not expect to encounter ANY validation errors pass only if this is true TEST_F(VkPositiveLayerTest, PointSizeWriteInFunction) { TEST_DESCRIPTION("Create a pipeline using TOPOLOGY_POINT_LIST and write PointSize in vertex shader function."); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(InitViewport()); // Create VS declaring PointSize and write to it in a function call. static const char PointSizeWriteVertShaderFcn[] = "#version 450\n" "vec2 vertices[3];\n" "out gl_PerVertex\n" "{\n" " vec4 gl_Position;\n" " float gl_PointSize;\n" "};\n" "void OutPointSize() {\n" " gl_PointSize = 7.0;\n" "}\n" "void main() {\n" " vertices[0] = vec2(-1.0, -1.0);\n" " vertices[1] = vec2( 1.0, -1.0);\n" " vertices[2] = vec2( 0.0, 1.0);\n" " gl_Position = vec4(vertices[gl_VertexIndex % 3], 0.0, 1.0);\n" " OutPointSize();\n" "}\n"; VkShaderObj vs(m_device, PointSizeWriteVertShaderFcn, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj ps(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); { VkPipelineObj pipelineobj(m_device); pipelineobj.AddDefaultColorAttachment(); pipelineobj.AddShader(&vs); pipelineobj.AddShader(&ps); // Set Input Assembly to TOPOLOGY POINT LIST VkPipelineInputAssemblyStateCreateInfo ia_state = {}; ia_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; ia_state.topology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST; pipelineobj.SetInputAssembly(&ia_state); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); m_commandBuffer->ClearAllBuffers(m_renderTargets, m_clear_color, m_depthStencil, m_depth_clear_color, m_stencil_clear_color); m_commandBuffer->PrepareAttachments(m_renderTargets, m_depthStencil); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipelineobj.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); } m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, PointSizeGeomShaderSuccess) { TEST_DESCRIPTION( "Create a pipeline using TOPOLOGY_POINT_LIST, set PointSize vertex shader, and write in the final geometry stage."); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->ExpectSuccess(); if ((!m_device->phy().features().geometryShader) || (!m_device->phy().features().shaderTessellationAndGeometryPointSize)) { printf("%s Device does not support the required geometry shader features; skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitViewport()); // Create VS declaring PointSize and writing to it static const char PointSizeVertShader[] = "#version 450\n" "vec2 vertices[3];\n" "out gl_PerVertex\n" "{\n" " vec4 gl_Position;\n" " float gl_PointSize;\n" "};\n" "void main() {\n" " vertices[0] = vec2(-1.0, -1.0);\n" " vertices[1] = vec2( 1.0, -1.0);\n" " vertices[2] = vec2( 0.0, 1.0);\n" " gl_Position = vec4(vertices[gl_VertexIndex % 3], 0.0, 1.0);\n" " gl_PointSize = 5.0;\n" "}\n"; static char const *gsSource = "#version 450\n" "layout (points) in;\n" "layout (points) out;\n" "layout (max_vertices = 1) out;\n" "void main() {\n" " gl_Position = vec4(1.0, 0.5, 0.5, 0.0);\n" " gl_PointSize = 3.3;\n" " EmitVertex();\n" "}\n"; VkShaderObj vs(m_device, PointSizeVertShader, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj gs(m_device, gsSource, VK_SHADER_STAGE_GEOMETRY_BIT, this); VkShaderObj ps(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipelineobj(m_device); pipelineobj.AddDefaultColorAttachment(); pipelineobj.AddShader(&vs); pipelineobj.AddShader(&gs); pipelineobj.AddShader(&ps); // Set Input Assembly to TOPOLOGY POINT LIST VkPipelineInputAssemblyStateCreateInfo ia_state = {}; ia_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; ia_state.topology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST; pipelineobj.SetInputAssembly(&ia_state); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); m_commandBuffer->ClearAllBuffers(m_renderTargets, m_clear_color, m_depthStencil, m_depth_clear_color, m_stencil_clear_color); m_commandBuffer->PrepareAttachments(m_renderTargets, m_depthStencil); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipelineobj.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, LoosePointSizeWrite) { TEST_DESCRIPTION("Create a pipeline using TOPOLOGY_POINT_LIST and write PointSize outside of a structure."); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(InitViewport()); const std::string LoosePointSizeWrite = R"( OpCapability Shader %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint Vertex %main "main" %glposition %glpointsize %gl_VertexIndex OpSource GLSL 450 OpName %main "main" OpName %vertices "vertices" OpName %glposition "glposition" OpName %glpointsize "glpointsize" OpName %gl_VertexIndex "gl_VertexIndex" OpDecorate %glposition BuiltIn Position OpDecorate %glpointsize BuiltIn PointSize OpDecorate %gl_VertexIndex BuiltIn VertexIndex %void = OpTypeVoid %3 = OpTypeFunction %void %float = OpTypeFloat 32 %v2float = OpTypeVector %float 2 %uint = OpTypeInt 32 0 %uint_3 = OpConstant %uint 3 %_arr_v2float_uint_3 = OpTypeArray %v2float %uint_3 %_ptr_Private__arr_v2float_uint_3 = OpTypePointer Private %_arr_v2float_uint_3 %vertices = OpVariable %_ptr_Private__arr_v2float_uint_3 Private %int = OpTypeInt 32 1 %int_0 = OpConstant %int 0 %float_n1 = OpConstant %float -1 %16 = OpConstantComposite %v2float %float_n1 %float_n1 %_ptr_Private_v2float = OpTypePointer Private %v2float %int_1 = OpConstant %int 1 %float_1 = OpConstant %float 1 %21 = OpConstantComposite %v2float %float_1 %float_n1 %int_2 = OpConstant %int 2 %float_0 = OpConstant %float 0 %25 = OpConstantComposite %v2float %float_0 %float_1 %v4float = OpTypeVector %float 4 %_ptr_Output_gl_Position = OpTypePointer Output %v4float %glposition = OpVariable %_ptr_Output_gl_Position Output %_ptr_Output_gl_PointSize = OpTypePointer Output %float %glpointsize = OpVariable %_ptr_Output_gl_PointSize Output %_ptr_Input_int = OpTypePointer Input %int %gl_VertexIndex = OpVariable %_ptr_Input_int Input %int_3 = OpConstant %int 3 %_ptr_Output_v4float = OpTypePointer Output %v4float %_ptr_Output_float = OpTypePointer Output %float %main = OpFunction %void None %3 %5 = OpLabel %18 = OpAccessChain %_ptr_Private_v2float %vertices %int_0 OpStore %18 %16 %22 = OpAccessChain %_ptr_Private_v2float %vertices %int_1 OpStore %22 %21 %26 = OpAccessChain %_ptr_Private_v2float %vertices %int_2 OpStore %26 %25 %33 = OpLoad %int %gl_VertexIndex %35 = OpSMod %int %33 %int_3 %36 = OpAccessChain %_ptr_Private_v2float %vertices %35 %37 = OpLoad %v2float %36 %38 = OpCompositeExtract %float %37 0 %39 = OpCompositeExtract %float %37 1 %40 = OpCompositeConstruct %v4float %38 %39 %float_0 %float_1 %42 = OpAccessChain %_ptr_Output_v4float %glposition OpStore %42 %40 OpStore %glpointsize %float_1 OpReturn OpFunctionEnd )"; // Create VS declaring PointSize and write to it in a function call. VkShaderObj vs(m_device, LoosePointSizeWrite, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj ps(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); { VkPipelineObj pipelineobj(m_device); pipelineobj.AddDefaultColorAttachment(); pipelineobj.AddShader(&vs); pipelineobj.AddShader(&ps); // Set Input Assembly to TOPOLOGY POINT LIST VkPipelineInputAssemblyStateCreateInfo ia_state = {}; ia_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; ia_state.topology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST; pipelineobj.SetInputAssembly(&ia_state); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); m_commandBuffer->ClearAllBuffers(m_renderTargets, m_clear_color, m_depthStencil, m_depth_clear_color, m_stencil_clear_color); m_commandBuffer->PrepareAttachments(m_renderTargets, m_depthStencil); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipelineobj.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); } m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, UncompressedToCompressedImageCopy) { TEST_DESCRIPTION("Image copies between compressed and uncompressed images"); ASSERT_NO_FATAL_FAILURE(Init()); // Verify format support // Size-compatible (64-bit) formats. Uncompressed is 64 bits per texel, compressed is 64 bits per 4x4 block (or 4bpt). if (!ImageFormatAndFeaturesSupported(gpu(), VK_FORMAT_R16G16B16A16_UINT, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR | VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR) || !ImageFormatAndFeaturesSupported(gpu(), VK_FORMAT_BC1_RGBA_SRGB_BLOCK, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR | VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR)) { printf("%s Required formats/features not supported - UncompressedToCompressedImageCopy skipped.\n", kSkipPrefix); return; } VkImageObj uncomp_10x10t_image(m_device); // Size = 10 * 10 * 64 = 6400 VkImageObj comp_10x10b_40x40t_image(m_device); // Size = 40 * 40 * 4 = 6400 uncomp_10x10t_image.Init(10, 10, 1, VK_FORMAT_R16G16B16A16_UINT, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL); comp_10x10b_40x40t_image.Init(40, 40, 1, VK_FORMAT_BC1_RGBA_SRGB_BLOCK, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL); if (!uncomp_10x10t_image.initialized() || !comp_10x10b_40x40t_image.initialized()) { printf("%s Unable to initialize surfaces - UncompressedToCompressedImageCopy skipped.\n", kSkipPrefix); return; } // Both copies represent the same number of bytes. Bytes Per Texel = 1 for bc6, 16 for uncompressed // Copy compressed to uncompressed VkImageCopy copy_region = {}; copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.srcSubresource.mipLevel = 0; copy_region.dstSubresource.mipLevel = 0; copy_region.srcSubresource.baseArrayLayer = 0; copy_region.dstSubresource.baseArrayLayer = 0; copy_region.srcSubresource.layerCount = 1; copy_region.dstSubresource.layerCount = 1; copy_region.srcOffset = {0, 0, 0}; copy_region.dstOffset = {0, 0, 0}; m_errorMonitor->ExpectSuccess(); m_commandBuffer->begin(); // Copy from uncompressed to compressed copy_region.extent = {10, 10, 1}; // Dimensions in (uncompressed) texels vkCmdCopyImage(m_commandBuffer->handle(), uncomp_10x10t_image.handle(), VK_IMAGE_LAYOUT_GENERAL, comp_10x10b_40x40t_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); // And from compressed to uncompressed copy_region.extent = {40, 40, 1}; // Dimensions in (compressed) texels vkCmdCopyImage(m_commandBuffer->handle(), comp_10x10b_40x40t_image.handle(), VK_IMAGE_LAYOUT_GENERAL, uncomp_10x10t_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, ©_region); m_errorMonitor->VerifyNotFound(); m_commandBuffer->end(); } TEST_F(VkPositiveLayerTest, DeleteDescriptorSetLayoutsBeforeDescriptorSets) { TEST_DESCRIPTION("Create DSLayouts and DescriptorSets and then delete the DSLayouts before the DescriptorSets."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkResult err; m_errorMonitor->ExpectSuccess(); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_SAMPLER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool_one; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool_one); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; VkDescriptorSet descriptorSet; { const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool_one; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); } // ds_layout destroyed err = vkFreeDescriptorSets(m_device->device(), ds_pool_one, 1, &descriptorSet); vkDestroyDescriptorPool(m_device->device(), ds_pool_one, NULL); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CommandPoolDeleteWithReferences) { TEST_DESCRIPTION("Ensure the validation layers bookkeeping tracks the implicit command buffer frees."); ASSERT_NO_FATAL_FAILURE(Init()); VkCommandPoolCreateInfo cmd_pool_info = {}; cmd_pool_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; cmd_pool_info.pNext = NULL; cmd_pool_info.queueFamilyIndex = m_device->graphics_queue_node_index_; cmd_pool_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; cmd_pool_info.flags = 0; VkCommandPool secondary_cmd_pool; VkResult res = vkCreateCommandPool(m_device->handle(), &cmd_pool_info, NULL, &secondary_cmd_pool); ASSERT_VK_SUCCESS(res); VkCommandBufferAllocateInfo cmdalloc = vk_testing::CommandBuffer::create_info(secondary_cmd_pool); cmdalloc.level = VK_COMMAND_BUFFER_LEVEL_SECONDARY; VkCommandBuffer secondary_cmds; res = vkAllocateCommandBuffers(m_device->handle(), &cmdalloc, &secondary_cmds); VkCommandBufferInheritanceInfo cmd_buf_inheritance_info = {}; cmd_buf_inheritance_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; cmd_buf_inheritance_info.pNext = NULL; cmd_buf_inheritance_info.renderPass = VK_NULL_HANDLE; cmd_buf_inheritance_info.subpass = 0; cmd_buf_inheritance_info.framebuffer = VK_NULL_HANDLE; cmd_buf_inheritance_info.occlusionQueryEnable = VK_FALSE; cmd_buf_inheritance_info.queryFlags = 0; cmd_buf_inheritance_info.pipelineStatistics = 0; VkCommandBufferBeginInfo secondary_begin = {}; secondary_begin.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; secondary_begin.pNext = NULL; secondary_begin.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; secondary_begin.pInheritanceInfo = &cmd_buf_inheritance_info; res = vkBeginCommandBuffer(secondary_cmds, &secondary_begin); ASSERT_VK_SUCCESS(res); vkEndCommandBuffer(secondary_cmds); m_commandBuffer->begin(); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary_cmds); m_commandBuffer->end(); // DestroyCommandPool *implicitly* frees the command buffers allocated from it vkDestroyCommandPool(m_device->handle(), secondary_cmd_pool, NULL); // If bookkeeping has been lax, validating the reset will attempt to touch deleted data res = vkResetCommandPool(m_device->handle(), m_commandPool->handle(), 0); ASSERT_VK_SUCCESS(res); } TEST_F(VkLayerTest, SecondaryCommandBufferClearColorAttachmentsRenderArea) { TEST_DESCRIPTION( "Create a secondary command buffer with CmdClearAttachments call that has a rect outside of renderPass renderArea"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkCommandBufferAllocateInfo command_buffer_allocate_info = {}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = m_commandPool->handle(); command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_SECONDARY; command_buffer_allocate_info.commandBufferCount = 1; VkCommandBuffer secondary_command_buffer; ASSERT_VK_SUCCESS(vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &secondary_command_buffer)); VkCommandBufferBeginInfo command_buffer_begin_info = {}; VkCommandBufferInheritanceInfo command_buffer_inheritance_info = {}; command_buffer_inheritance_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; command_buffer_inheritance_info.renderPass = m_renderPass; command_buffer_inheritance_info.framebuffer = m_framebuffer; command_buffer_begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; command_buffer_begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT; command_buffer_begin_info.pInheritanceInfo = &command_buffer_inheritance_info; vkBeginCommandBuffer(secondary_command_buffer, &command_buffer_begin_info); VkClearAttachment color_attachment; color_attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; color_attachment.clearValue.color.float32[0] = 0; color_attachment.clearValue.color.float32[1] = 0; color_attachment.clearValue.color.float32[2] = 0; color_attachment.clearValue.color.float32[3] = 0; color_attachment.colorAttachment = 0; // x extent of 257 exceeds render area of 256 VkClearRect clear_rect = {{{0, 0}, {257, 32}}}; vkCmdClearAttachments(secondary_command_buffer, 1, &color_attachment, 1, &clear_rect); vkEndCommandBuffer(secondary_command_buffer); m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearAttachments-pRects-00016"); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary_command_buffer); m_errorMonitor->VerifyFound(); vkCmdEndRenderPass(m_commandBuffer->handle()); m_commandBuffer->end(); } TEST_F(VkPositiveLayerTest, SecondaryCommandBufferClearColorAttachments) { TEST_DESCRIPTION("Create a secondary command buffer and record a CmdClearAttachments call into it"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkCommandBufferAllocateInfo command_buffer_allocate_info = {}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = m_commandPool->handle(); command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_SECONDARY; command_buffer_allocate_info.commandBufferCount = 1; VkCommandBuffer secondary_command_buffer; ASSERT_VK_SUCCESS(vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &secondary_command_buffer)); VkCommandBufferBeginInfo command_buffer_begin_info = {}; VkCommandBufferInheritanceInfo command_buffer_inheritance_info = {}; command_buffer_inheritance_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; command_buffer_inheritance_info.renderPass = m_renderPass; command_buffer_inheritance_info.framebuffer = m_framebuffer; command_buffer_begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; command_buffer_begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT; command_buffer_begin_info.pInheritanceInfo = &command_buffer_inheritance_info; vkBeginCommandBuffer(secondary_command_buffer, &command_buffer_begin_info); VkClearAttachment color_attachment; color_attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; color_attachment.clearValue.color.float32[0] = 0; color_attachment.clearValue.color.float32[1] = 0; color_attachment.clearValue.color.float32[2] = 0; color_attachment.clearValue.color.float32[3] = 0; color_attachment.colorAttachment = 0; VkClearRect clear_rect = {{{0, 0}, {32, 32}}}; vkCmdClearAttachments(secondary_command_buffer, 1, &color_attachment, 1, &clear_rect); vkEndCommandBuffer(secondary_command_buffer); m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary_command_buffer); vkCmdEndRenderPass(m_commandBuffer->handle()); m_commandBuffer->end(); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, SecondaryCommandBufferImageLayoutTransitions) { TEST_DESCRIPTION("Perform an image layout transition in a secondary command buffer followed by a transition in the primary."); VkResult err; m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s Couldn't find depth stencil format.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Allocate a secondary and primary cmd buffer VkCommandBufferAllocateInfo command_buffer_allocate_info = {}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = m_commandPool->handle(); command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_SECONDARY; command_buffer_allocate_info.commandBufferCount = 1; VkCommandBuffer secondary_command_buffer; ASSERT_VK_SUCCESS(vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &secondary_command_buffer)); command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; VkCommandBuffer primary_command_buffer; ASSERT_VK_SUCCESS(vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &primary_command_buffer)); VkCommandBufferBeginInfo command_buffer_begin_info = {}; VkCommandBufferInheritanceInfo command_buffer_inheritance_info = {}; command_buffer_inheritance_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; command_buffer_begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; command_buffer_begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; command_buffer_begin_info.pInheritanceInfo = &command_buffer_inheritance_info; err = vkBeginCommandBuffer(secondary_command_buffer, &command_buffer_begin_info); ASSERT_VK_SUCCESS(err); VkImageObj image(m_device); image.Init(128, 128, 1, depth_format, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageMemoryBarrier img_barrier = {}; img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; img_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; img_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED; img_barrier.newLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; img_barrier.image = image.handle(); img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; vkCmdPipelineBarrier(secondary_command_buffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); err = vkEndCommandBuffer(secondary_command_buffer); ASSERT_VK_SUCCESS(err); // Now update primary cmd buffer to execute secondary and transitions image command_buffer_begin_info.pInheritanceInfo = nullptr; err = vkBeginCommandBuffer(primary_command_buffer, &command_buffer_begin_info); ASSERT_VK_SUCCESS(err); vkCmdExecuteCommands(primary_command_buffer, 1, &secondary_command_buffer); VkImageMemoryBarrier img_barrier2 = {}; img_barrier2.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier2.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; img_barrier2.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; img_barrier2.oldLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; img_barrier2.newLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; img_barrier2.image = image.handle(); img_barrier2.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier2.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier2.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; img_barrier2.subresourceRange.baseArrayLayer = 0; img_barrier2.subresourceRange.baseMipLevel = 0; img_barrier2.subresourceRange.layerCount = 1; img_barrier2.subresourceRange.levelCount = 1; vkCmdPipelineBarrier(primary_command_buffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier2); err = vkEndCommandBuffer(primary_command_buffer); ASSERT_VK_SUCCESS(err); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &primary_command_buffer; err = vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); ASSERT_VK_SUCCESS(err); m_errorMonitor->VerifyNotFound(); err = vkDeviceWaitIdle(m_device->device()); ASSERT_VK_SUCCESS(err); vkFreeCommandBuffers(m_device->device(), m_commandPool->handle(), 1, &secondary_command_buffer); vkFreeCommandBuffers(m_device->device(), m_commandPool->handle(), 1, &primary_command_buffer); } // This is a positive test. No failures are expected. TEST_F(VkPositiveLayerTest, IgnoreUnrelatedDescriptor) { TEST_DESCRIPTION( "Ensure that the vkUpdateDescriptorSets validation code is ignoring VkWriteDescriptorSet members that are not related to " "the descriptor type specified by VkWriteDescriptorSet::descriptorType. Correct validation behavior will result in the " "test running to completion without validation errors."); const uintptr_t invalid_ptr = 0xcdcdcdcd; ASSERT_NO_FATAL_FAILURE(Init()); // Verify VK_FORMAT_R8_UNORM supports VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT const VkFormat format_texel_case = VK_FORMAT_R8_UNORM; const char *format_texel_case_string = "VK_FORMAT_R8_UNORM"; VkFormatProperties format_properties; vkGetPhysicalDeviceFormatProperties(gpu(), format_texel_case, &format_properties); if (!(format_properties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT)) { printf("%s Test requires %s to support VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT\n", kSkipPrefix, format_texel_case_string); return; } // Image Case { m_errorMonitor->ExpectSuccess(); VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkImageView view = image.targetView(VK_FORMAT_B8G8R8A8_UNORM); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkDescriptorImageInfo image_info = {}; image_info.imageView = view; image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; descriptor_write.pImageInfo = &image_info; // Set pBufferInfo and pTexelBufferView to invalid values, which should // be // ignored for descriptorType == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE. // This will most likely produce a crash if the parameter_validation // layer // does not correctly ignore pBufferInfo. descriptor_write.pBufferInfo = reinterpret_cast(invalid_ptr); descriptor_write.pTexelBufferView = reinterpret_cast(invalid_ptr); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyNotFound(); } // Buffer Case { m_errorMonitor->ExpectSuccess(); VkBuffer buffer; uint32_t queue_family_index = 0; VkBufferCreateInfo buffer_create_info = {}; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.size = 1024; buffer_create_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buffer_create_info.queueFamilyIndexCount = 1; buffer_create_info.pQueueFamilyIndices = &queue_family_index; VkResult err = vkCreateBuffer(m_device->device(), &buffer_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements memory_reqs; VkDeviceMemory buffer_memory; bool pass; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.pNext = NULL; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vkGetBufferMemoryRequirements(m_device->device(), buffer, &memory_reqs); memory_info.allocationSize = memory_reqs.size; pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &buffer_memory); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, buffer_memory, 0); ASSERT_VK_SUCCESS(err); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkDescriptorBufferInfo buffer_info = {}; buffer_info.buffer = buffer; buffer_info.offset = 0; buffer_info.range = 1024; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_write.pBufferInfo = &buffer_info; // Set pImageInfo and pTexelBufferView to invalid values, which should // be // ignored for descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER. // This will most likely produce a crash if the parameter_validation // layer // does not correctly ignore pImageInfo. descriptor_write.pImageInfo = reinterpret_cast(invalid_ptr); descriptor_write.pTexelBufferView = reinterpret_cast(invalid_ptr); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyNotFound(); vkDestroyBuffer(m_device->device(), buffer, NULL); vkFreeMemory(m_device->device(), buffer_memory, NULL); } // Texel Buffer Case { m_errorMonitor->ExpectSuccess(); VkBuffer buffer; uint32_t queue_family_index = 0; VkBufferCreateInfo buffer_create_info = {}; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.size = 1024; buffer_create_info.usage = VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT; buffer_create_info.queueFamilyIndexCount = 1; buffer_create_info.pQueueFamilyIndices = &queue_family_index; VkResult err = vkCreateBuffer(m_device->device(), &buffer_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements memory_reqs; VkDeviceMemory buffer_memory; bool pass; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.pNext = NULL; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vkGetBufferMemoryRequirements(m_device->device(), buffer, &memory_reqs); memory_info.allocationSize = memory_reqs.size; pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &buffer_memory); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, buffer_memory, 0); ASSERT_VK_SUCCESS(err); VkBufferViewCreateInfo buff_view_ci = {}; buff_view_ci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO; buff_view_ci.buffer = buffer; buff_view_ci.format = format_texel_case; buff_view_ci.range = VK_WHOLE_SIZE; VkBufferView buffer_view; err = vkCreateBufferView(m_device->device(), &buff_view_ci, NULL, &buffer_view); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; descriptor_write.pTexelBufferView = &buffer_view; // Set pImageInfo and pBufferInfo to invalid values, which should be // ignored for descriptorType == // VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER. // This will most likely produce a crash if the parameter_validation // layer // does not correctly ignore pImageInfo and pBufferInfo. descriptor_write.pImageInfo = reinterpret_cast(invalid_ptr); descriptor_write.pBufferInfo = reinterpret_cast(invalid_ptr); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyNotFound(); vkDestroyBufferView(m_device->device(), buffer_view, NULL); vkDestroyBuffer(m_device->device(), buffer, NULL); vkFreeMemory(m_device->device(), buffer_memory, NULL); } } TEST_F(VkPositiveLayerTest, ImmutableSamplerOnlyDescriptor) { TEST_DESCRIPTION("Bind a DescriptorSet with only an immutable sampler and make sure that we don't warn for no update."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; VkResult err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); m_errorMonitor->ExpectSuccess(); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, 0, nullptr); m_errorMonitor->VerifyNotFound(); vkDestroySampler(m_device->device(), sampler, NULL); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, DuplicateDescriptorBinding) { TEST_DESCRIPTION("Create a descriptor set layout with a duplicate binding number."); ASSERT_NO_FATAL_FAILURE(Init()); // Create layout where two binding #s are "1" static const uint32_t NUM_BINDINGS = 3; VkDescriptorSetLayoutBinding dsl_binding[NUM_BINDINGS] = {}; dsl_binding[0].binding = 1; dsl_binding[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding[0].descriptorCount = 1; dsl_binding[0].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dsl_binding[0].pImmutableSamplers = NULL; dsl_binding[1].binding = 0; dsl_binding[1].descriptorCount = 1; dsl_binding[1].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding[1].descriptorCount = 1; dsl_binding[1].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dsl_binding[1].pImmutableSamplers = NULL; dsl_binding[2].binding = 1; // Duplicate binding should cause error dsl_binding[2].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding[2].descriptorCount = 1; dsl_binding[2].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dsl_binding[2].pImmutableSamplers = NULL; VkDescriptorSetLayoutCreateInfo ds_layout_ci = {}; ds_layout_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; ds_layout_ci.pNext = NULL; ds_layout_ci.bindingCount = NUM_BINDINGS; ds_layout_ci.pBindings = dsl_binding; VkDescriptorSetLayout ds_layout; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetLayoutCreateInfo-binding-00279"); vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidPushDescriptorSetLayout) { TEST_DESCRIPTION("Create a push descriptor set layout with invalid bindings."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); // Get the push descriptor limits auto push_descriptor_prop = GetPushDescriptorProperties(instance(), gpu()); if (push_descriptor_prop.maxPushDescriptors < 1) { // Some implementations report an invalid maxPushDescriptors of 0 printf("%s maxPushDescriptors is zero, skipping tests\n", kSkipPrefix); return; } VkDescriptorSetLayoutBinding binding = {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; auto ds_layout_ci = lvl_init_struct(); ds_layout_ci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR; ds_layout_ci.bindingCount = 1; ds_layout_ci.pBindings = &binding; // Note that as binding is referenced in ds_layout_ci, it is effectively in the closure by reference as well. auto test_create_ds_layout = [&ds_layout_ci, this](std::string error) { VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, error); vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); m_errorMonitor->VerifyFound(); vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); }; // Starting with the initial VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC type set above.. test_create_ds_layout("VUID-VkDescriptorSetLayoutCreateInfo-flags-00280"); binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC; test_create_ds_layout( "VUID-VkDescriptorSetLayoutCreateInfo-flags-00280"); // This is the same VUID as above, just a second error condition. if (!(push_descriptor_prop.maxPushDescriptors == std::numeric_limits::max())) { binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; binding.descriptorCount = push_descriptor_prop.maxPushDescriptors + 1; test_create_ds_layout("VUID-VkDescriptorSetLayoutCreateInfo-flags-00281"); } else { printf("%s maxPushDescriptors is set to maximum unit32_t value, skipping 'out of range test'.\n", kSkipPrefix); } } TEST_F(VkLayerTest, PushDescriptorSetLayoutWithoutExtension) { TEST_DESCRIPTION("Create a push descriptor set layout without loading the needed extension."); ASSERT_NO_FATAL_FAILURE(Init()); VkDescriptorSetLayoutBinding binding = {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; auto ds_layout_ci = lvl_init_struct(); ds_layout_ci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR; ds_layout_ci.bindingCount = 1; ds_layout_ci.pBindings = &binding; std::string error = "Attempted to use VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR in "; error = error + "VkDescriptorSetLayoutCreateInfo::flags but its required extension "; error = error + VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME; error = error + " has not been enabled."; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, error.c_str()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetLayoutCreateInfo-flags-00281"); VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE; vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); m_errorMonitor->VerifyFound(); vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); } TEST_F(VkLayerTest, DescriptorIndexingSetLayoutWithoutExtension) { TEST_DESCRIPTION("Create an update_after_bind set layout without loading the needed extension."); ASSERT_NO_FATAL_FAILURE(Init()); auto ds_layout_ci = lvl_init_struct(); ds_layout_ci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT; std::string error = "Attemped to use VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT in "; error = error + "VkDescriptorSetLayoutCreateInfo::flags but its required extension "; error = error + VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME; error = error + " has not been enabled."; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, error.c_str()); VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE; vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); m_errorMonitor->VerifyFound(); vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); } TEST_F(VkLayerTest, DescriptorIndexingSetLayout) { TEST_DESCRIPTION("Exercise various create/allocate-time errors related to VK_EXT_descriptor_indexing."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); std::array required_device_extensions = { {VK_KHR_MAINTENANCE3_EXTENSION_NAME, VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME}}; for (auto device_extension : required_device_extensions) { if (DeviceExtensionSupported(gpu(), nullptr, device_extension)) { m_device_extension_names.push_back(device_extension); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, device_extension); return; } } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vkGetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); // Create a device that enables all supported indexing features except descriptorBindingUniformBufferUpdateAfterBind auto indexing_features = lvl_init_struct(); auto features2 = lvl_init_struct(&indexing_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); indexing_features.descriptorBindingUniformBufferUpdateAfterBind = VK_FALSE; ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); std::array flags = {VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT, VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT}; auto flags_create_info = lvl_init_struct(); flags_create_info.bindingCount = (uint32_t)flags.size(); flags_create_info.pBindingFlags = flags.data(); VkDescriptorSetLayoutBinding binding = {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; auto ds_layout_ci = lvl_init_struct(&flags_create_info); ds_layout_ci.bindingCount = 1; ds_layout_ci.pBindings = &binding; VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE; // VU for VkDescriptorSetLayoutBindingFlagsCreateInfoEXT::bindingCount flags_create_info.bindingCount = 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-bindingCount-03002"); VkResult err = vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); m_errorMonitor->VerifyFound(); vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); flags_create_info.bindingCount = 1; // set is missing UPDATE_AFTER_BIND_POOL flag. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetLayoutCreateInfo-flags-03000"); // binding uses a feature we disabled m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-descriptorBindingUniformBufferUpdateAfterBind-03005"); err = vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); m_errorMonitor->VerifyFound(); vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); ds_layout_ci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT; ds_layout_ci.bindingCount = 0; flags_create_info.bindingCount = 0; err = vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); ASSERT_VK_SUCCESS(err); VkDescriptorPoolSize pool_size = {binding.descriptorType, binding.descriptorCount}; auto dspci = lvl_init_struct(); dspci.poolSizeCount = 1; dspci.pPoolSizes = &pool_size; dspci.maxSets = 1; VkDescriptorPool pool; err = vkCreateDescriptorPool(m_device->handle(), &dspci, nullptr, &pool); ASSERT_VK_SUCCESS(err); auto ds_alloc_info = lvl_init_struct(); ds_alloc_info.descriptorPool = pool; ds_alloc_info.descriptorSetCount = 1; ds_alloc_info.pSetLayouts = &ds_layout; VkDescriptorSet ds = VK_NULL_HANDLE; // mismatch between descriptor set and pool m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetAllocateInfo-pSetLayouts-03044"); vkAllocateDescriptorSets(m_device->handle(), &ds_alloc_info, &ds); m_errorMonitor->VerifyFound(); vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); vkDestroyDescriptorPool(m_device->handle(), pool, nullptr); if (indexing_features.descriptorBindingVariableDescriptorCount) { ds_layout_ci.flags = 0; ds_layout_ci.bindingCount = 1; flags_create_info.bindingCount = 1; flags[0] = VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT; err = vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); ASSERT_VK_SUCCESS(err); pool_size = {binding.descriptorType, binding.descriptorCount}; dspci = lvl_init_struct(); dspci.poolSizeCount = 1; dspci.pPoolSizes = &pool_size; dspci.maxSets = 1; err = vkCreateDescriptorPool(m_device->handle(), &dspci, nullptr, &pool); ASSERT_VK_SUCCESS(err); auto count_alloc_info = lvl_init_struct(); count_alloc_info.descriptorSetCount = 1; // Set variable count larger than what was in the descriptor binding uint32_t variable_count = 2; count_alloc_info.pDescriptorCounts = &variable_count; ds_alloc_info = lvl_init_struct(&count_alloc_info); ds_alloc_info.descriptorPool = pool; ds_alloc_info.descriptorSetCount = 1; ds_alloc_info.pSetLayouts = &ds_layout; ds = VK_NULL_HANDLE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetVariableDescriptorCountAllocateInfoEXT-pSetLayouts-03046"); vkAllocateDescriptorSets(m_device->handle(), &ds_alloc_info, &ds); m_errorMonitor->VerifyFound(); vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); vkDestroyDescriptorPool(m_device->handle(), pool, nullptr); } } TEST_F(VkLayerTest, DescriptorIndexingUpdateAfterBind) { TEST_DESCRIPTION("Exercise errors for updating a descriptor set after it is bound."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME) && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE3_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE3_EXTENSION_NAME); } else { printf("%s Descriptor Indexing or Maintenance3 Extension not supported, skipping tests\n", kSkipPrefix); return; } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vkGetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); // Create a device that enables all supported indexing features except descriptorBindingUniformBufferUpdateAfterBind auto indexing_features = lvl_init_struct(); auto features2 = lvl_init_struct(&indexing_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); indexing_features.descriptorBindingUniformBufferUpdateAfterBind = VK_FALSE; if (VK_FALSE == indexing_features.descriptorBindingStorageBufferUpdateAfterBind) { printf("%s Test requires (unsupported) descriptorBindingStorageBufferUpdateAfterBind, skipping\n", kSkipPrefix); return; } if (VK_FALSE == features2.features.fragmentStoresAndAtomics) { printf("%s Test requires (unsupported) fragmentStoresAndAtomics, skipping\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorBindingFlagsEXT flags[2] = {0, VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT}; auto flags_create_info = lvl_init_struct(); flags_create_info.bindingCount = 2; flags_create_info.pBindingFlags = &flags[0]; // Descriptor set has two bindings - only the second is update_after_bind VkDescriptorSetLayoutBinding binding[2] = { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, {1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, }; auto ds_layout_ci = lvl_init_struct(&flags_create_info); ds_layout_ci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT; ds_layout_ci.bindingCount = 2; ds_layout_ci.pBindings = &binding[0]; VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE; VkResult err = vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); VkDescriptorPoolSize pool_sizes[2] = { {binding[0].descriptorType, binding[0].descriptorCount}, {binding[1].descriptorType, binding[1].descriptorCount}, }; auto dspci = lvl_init_struct(); dspci.flags = VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT; dspci.poolSizeCount = 2; dspci.pPoolSizes = &pool_sizes[0]; dspci.maxSets = 1; VkDescriptorPool pool; err = vkCreateDescriptorPool(m_device->handle(), &dspci, nullptr, &pool); ASSERT_VK_SUCCESS(err); auto ds_alloc_info = lvl_init_struct(); ds_alloc_info.descriptorPool = pool; ds_alloc_info.descriptorSetCount = 1; ds_alloc_info.pSetLayouts = &ds_layout; VkDescriptorSet ds = VK_NULL_HANDLE; vkAllocateDescriptorSets(m_device->handle(), &ds_alloc_info, &ds); ASSERT_VK_SUCCESS(err); VkBufferCreateInfo buffCI = {}; buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffCI.size = 1024; buffCI.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT; VkBuffer dyub; err = vkCreateBuffer(m_device->device(), &buffCI, NULL, &dyub); ASSERT_VK_SUCCESS(err); VkDeviceMemory mem; VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(m_device->device(), dyub, &mem_reqs); VkMemoryAllocateInfo mem_alloc_info = {}; mem_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc_info.allocationSize = mem_reqs.size; m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); err = vkAllocateMemory(m_device->device(), &mem_alloc_info, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), dyub, mem, 0); ASSERT_VK_SUCCESS(err); VkDescriptorBufferInfo buffInfo[2] = {}; buffInfo[0].buffer = dyub; buffInfo[0].offset = 0; buffInfo[0].range = 1024; VkWriteDescriptorSet descriptor_write[2] = {}; descriptor_write[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write[0].dstSet = ds; descriptor_write[0].dstBinding = 0; descriptor_write[0].descriptorCount = 1; descriptor_write[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_write[0].pBufferInfo = buffInfo; descriptor_write[1] = descriptor_write[0]; descriptor_write[1].dstBinding = 1; descriptor_write[1].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; VkPipelineLayout pipeline_layout; VkPipelineLayoutCreateInfo pipeline_layout_ci = {}; pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipeline_layout_ci.setLayoutCount = 1; pipeline_layout_ci.pSetLayouts = &ds_layout; vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); // Create a dummy pipeline, since VL inspects which bindings are actually used at draw time char const *vsSource = "#version 450\n" "void main(){\n" " gl_Position = vec4(0);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "layout(set=0, binding=0) uniform foo0 { float x0; } bar0;\n" "layout(set=0, binding=1) buffer foo1 { float x1; } bar1;\n" "void main(){\n" " color = vec4(bar0.x0 + bar1.x1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.SetViewport(m_viewports); pipe.SetScissor(m_scissors); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.CreateVKPipeline(pipeline_layout, m_renderPass); // Make both bindings valid before binding to the command buffer vkUpdateDescriptorSets(m_device->device(), 2, &descriptor_write[0], 0, NULL); m_errorMonitor->VerifyNotFound(); // Two subtests. First only updates the update_after_bind binding and expects // no error. Second updates the other binding and expects an error when the // command buffer is ended. for (uint32_t i = 0; i < 2; ++i) { m_commandBuffer->begin(); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout, 0, 1, &ds, 0, NULL); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdDraw(m_commandBuffer->handle(), 0, 0, 0, 0); vkCmdEndRenderPass(m_commandBuffer->handle()); m_errorMonitor->VerifyNotFound(); // Valid to update binding 1 after being bound vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write[1], 0, NULL); m_errorMonitor->VerifyNotFound(); if (i == 0) { // expect no errors m_commandBuffer->end(); m_errorMonitor->VerifyNotFound(); } else { // Invalid to update binding 0 after being bound. But the error is actually // generated during vkEndCommandBuffer vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write[0], 0, NULL); m_errorMonitor->VerifyNotFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "is invalid because bound DescriptorSet"); vkEndCommandBuffer(m_commandBuffer->handle()); m_errorMonitor->VerifyFound(); } } vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); vkDestroyDescriptorPool(m_device->handle(), pool, nullptr); vkDestroyBuffer(m_device->handle(), dyub, NULL); vkFreeMemory(m_device->handle(), mem, NULL); vkDestroyPipelineLayout(m_device->handle(), pipeline_layout, NULL); } TEST_F(VkLayerTest, AllocatePushDescriptorSet) { TEST_DESCRIPTION("Attempt to allocate a push descriptor set."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); auto push_descriptor_prop = GetPushDescriptorProperties(instance(), gpu()); if (push_descriptor_prop.maxPushDescriptors < 1) { // Some implementations report an invalid maxPushDescriptors of 0 printf("%s maxPushDescriptors is zero, skipping tests\n", kSkipPrefix); return; } VkDescriptorSetLayoutBinding binding = {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; auto ds_layout_ci = lvl_init_struct(); ds_layout_ci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR; ds_layout_ci.bindingCount = 1; ds_layout_ci.pBindings = &binding; VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE; VkResult err = vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); ASSERT_VK_SUCCESS(err); VkDescriptorPoolSize pool_size = {binding.descriptorType, binding.descriptorCount}; auto dspci = lvl_init_struct(); dspci.poolSizeCount = 1; dspci.pPoolSizes = &pool_size; dspci.maxSets = 1; VkDescriptorPool pool; err = vkCreateDescriptorPool(m_device->handle(), &dspci, nullptr, &pool); ASSERT_VK_SUCCESS(err); auto ds_alloc_info = lvl_init_struct(); ds_alloc_info.descriptorPool = pool; ds_alloc_info.descriptorSetCount = 1; ds_alloc_info.pSetLayouts = &ds_layout; VkDescriptorSet ds = VK_NULL_HANDLE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetAllocateInfo-pSetLayouts-00308"); vkAllocateDescriptorSets(m_device->handle(), &ds_alloc_info, &ds); m_errorMonitor->VerifyFound(); vkDestroyDescriptorPool(m_device->handle(), pool, nullptr); vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); } TEST_F(VkLayerTest, PushDescriptorSetCmdPushBadArgs) { TEST_DESCRIPTION("Attempt to push a push descriptor set with incorrect arguments."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); auto push_descriptor_prop = GetPushDescriptorProperties(instance(), gpu()); if (push_descriptor_prop.maxPushDescriptors < 1) { // Some implementations report an invalid maxPushDescriptors of 0 printf("%s maxPushDescriptors is zero, skipping tests\n", kSkipPrefix); return; } // Create ordinary and push descriptor set layout VkDescriptorSetLayoutBinding binding = {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; const VkDescriptorSetLayoutObj ds_layout(m_device, {binding}); ASSERT_TRUE(ds_layout.initialized()); const VkDescriptorSetLayoutObj push_ds_layout(m_device, {binding}, VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR); ASSERT_TRUE(push_ds_layout.initialized()); // Now use the descriptor set layouts to create a pipeline layout const VkPipelineLayoutObj pipeline_layout(m_device, {&push_ds_layout, &ds_layout}); ASSERT_TRUE(pipeline_layout.initialized()); // Create a descriptor to push const uint32_t buffer_data[4] = {4, 5, 6, 7}; VkConstantBufferObj buffer_obj(m_device, sizeof(buffer_data), &buffer_data); ASSERT_TRUE(buffer_obj.initialized()); // Create a "write" struct, noting that the buffer_info cannot be a temporary arg (the return from write_descriptor_set // references its data), and the DescriptorSet() can be temporary, because the value is ignored VkDescriptorBufferInfo buffer_info = {buffer_obj.handle(), 0, VK_WHOLE_SIZE}; VkWriteDescriptorSet descriptor_write = vk_testing::Device::write_descriptor_set( vk_testing::DescriptorSet(), 0, 0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, &buffer_info); // Find address of extension call and make the call PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR = (PFN_vkCmdPushDescriptorSetKHR)vkGetDeviceProcAddr(m_device->device(), "vkCmdPushDescriptorSetKHR"); ASSERT_TRUE(vkCmdPushDescriptorSetKHR != nullptr); // Section 1: Queue family matching/capabilities. // Create command pool on a non-graphics queue const uint32_t no_gfx_qfi = m_device->QueueFamilyMatching(VK_QUEUE_COMPUTE_BIT, VK_QUEUE_GRAPHICS_BIT); const uint32_t transfer_only_qfi = m_device->QueueFamilyMatching(VK_QUEUE_TRANSFER_BIT, (VK_QUEUE_COMPUTE_BIT | VK_QUEUE_GRAPHICS_BIT)); if ((UINT32_MAX == transfer_only_qfi) && (UINT32_MAX == no_gfx_qfi)) { printf("%s No compute or transfer only queue family, skipping bindpoint and queue tests.", kSkipPrefix); } else { const uint32_t err_qfi = (UINT32_MAX == no_gfx_qfi) ? transfer_only_qfi : no_gfx_qfi; VkCommandPoolObj command_pool(m_device, err_qfi); ASSERT_TRUE(command_pool.initialized()); VkCommandBufferObj command_buffer(m_device, &command_pool); ASSERT_TRUE(command_buffer.initialized()); command_buffer.begin(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-00330"); if (err_qfi == transfer_only_qfi) { // This as this queue neither supports the gfx or compute bindpoints, we'll get two errors m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushDescriptorSetKHR-commandBuffer-cmdpool"); } vkCmdPushDescriptorSetKHR(command_buffer.handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptor_write); m_errorMonitor->VerifyFound(); command_buffer.end(); // If we succeed in testing only one condition above, we need to test the other below. if ((UINT32_MAX != transfer_only_qfi) && (err_qfi != transfer_only_qfi)) { // Need to test the neither compute/gfx supported case separately. VkCommandPoolObj tran_command_pool(m_device, transfer_only_qfi); ASSERT_TRUE(tran_command_pool.initialized()); VkCommandBufferObj tran_command_buffer(m_device, &tran_command_pool); ASSERT_TRUE(tran_command_buffer.initialized()); tran_command_buffer.begin(); // We can't avoid getting *both* errors in this case m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-00330"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushDescriptorSetKHR-commandBuffer-cmdpool"); vkCmdPushDescriptorSetKHR(tran_command_buffer.handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptor_write); m_errorMonitor->VerifyFound(); tran_command_buffer.end(); } } // Push to the non-push binding m_commandBuffer->begin(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushDescriptorSetKHR-set-00365"); vkCmdPushDescriptorSetKHR(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 1, 1, &descriptor_write); m_errorMonitor->VerifyFound(); // Specify set out of bounds m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushDescriptorSetKHR-set-00364"); vkCmdPushDescriptorSetKHR(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 2, 1, &descriptor_write); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); // This is a test for VUID-vkCmdPushDescriptorSetKHR-commandBuffer-recording // TODO: Add VALIDATION_ERROR_ code support to core_validation::ValidateCmd m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "You must call vkBeginCommandBuffer() before this call to vkCmdPushDescriptorSetKHR()"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-00330"); vkCmdPushDescriptorSetKHR(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptor_write); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, SetDynScissorParamTests) { TEST_DESCRIPTION("Test parameters of vkCmdSetScissor without multiViewport feature"); VkPhysicalDeviceFeatures features{}; ASSERT_NO_FATAL_FAILURE(Init(&features)); const VkRect2D scissor = {{0, 0}, {16, 16}}; const VkRect2D scissors[] = {scissor, scissor}; m_commandBuffer->begin(); // array tests m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-firstScissor-00593"); vkCmdSetScissor(m_commandBuffer->handle(), 1, 1, scissors); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-scissorCount-arraylength"); vkCmdSetScissor(m_commandBuffer->handle(), 0, 0, nullptr); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-scissorCount-00594"); vkCmdSetScissor(m_commandBuffer->handle(), 0, 2, scissors); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-firstScissor-00593"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-scissorCount-arraylength"); vkCmdSetScissor(m_commandBuffer->handle(), 1, 0, scissors); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-firstScissor-00593"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-scissorCount-00594"); vkCmdSetScissor(m_commandBuffer->handle(), 1, 2, scissors); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-pScissors-parameter"); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, nullptr); m_errorMonitor->VerifyFound(); struct TestCase { VkRect2D scissor; std::string vuid; }; std::vector test_cases = {{{{-1, 0}, {16, 16}}, "VUID-vkCmdSetScissor-x-00595"}, {{{0, -1}, {16, 16}}, "VUID-vkCmdSetScissor-x-00595"}, {{{1, 0}, {INT32_MAX, 16}}, "VUID-vkCmdSetScissor-offset-00596"}, {{{INT32_MAX, 0}, {1, 16}}, "VUID-vkCmdSetScissor-offset-00596"}, {{{0, 0}, {uint32_t{INT32_MAX} + 1, 16}}, "VUID-vkCmdSetScissor-offset-00596"}, {{{0, 1}, {16, INT32_MAX}}, "VUID-vkCmdSetScissor-offset-00597"}, {{{0, INT32_MAX}, {16, 1}}, "VUID-vkCmdSetScissor-offset-00597"}, {{{0, 0}, {16, uint32_t{INT32_MAX} + 1}}, "VUID-vkCmdSetScissor-offset-00597"}}; for (const auto &test_case : test_cases) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.vuid); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &test_case.scissor); m_errorMonitor->VerifyFound(); } m_commandBuffer->end(); } TEST_F(VkLayerTest, SetDynScissorParamMultiviewportTests) { TEST_DESCRIPTION("Test parameters of vkCmdSetScissor with multiViewport feature enabled"); ASSERT_NO_FATAL_FAILURE(Init()); if (!m_device->phy().features().multiViewport) { printf("%s VkPhysicalDeviceFeatures::multiViewport is not supported -- skipping test.\n", kSkipPrefix); return; } const auto max_scissors = m_device->props.limits.maxViewports; const uint32_t too_many_scissors = 65536 + 1; // let's say this is too much to allocate pScissors for m_commandBuffer->begin(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-scissorCount-arraylength"); vkCmdSetScissor(m_commandBuffer->handle(), 0, 0, nullptr); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-pScissors-parameter"); vkCmdSetScissor(m_commandBuffer->handle(), 0, max_scissors, nullptr); m_errorMonitor->VerifyFound(); if (max_scissors >= too_many_scissors) { printf( "%s VkPhysicalDeviceLimits::maxViewports is too large to practically test against -- skipping " "part of " "test.\n", kSkipPrefix); return; } const VkRect2D scissor = {{0, 0}, {16, 16}}; const std::vector scissors(max_scissors + 1, scissor); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-firstScissor-00592"); vkCmdSetScissor(m_commandBuffer->handle(), 0, max_scissors + 1, scissors.data()); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-firstScissor-00592"); vkCmdSetScissor(m_commandBuffer->handle(), max_scissors, 1, scissors.data()); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-firstScissor-00592"); vkCmdSetScissor(m_commandBuffer->handle(), 1, max_scissors, scissors.data()); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-scissorCount-arraylength"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-firstScissor-00592"); vkCmdSetScissor(m_commandBuffer->handle(), max_scissors + 1, 0, scissors.data()); m_errorMonitor->VerifyFound(); } // This is a positive test. No failures are expected. TEST_F(VkPositiveLayerTest, EmptyDescriptorUpdateTest) { TEST_DESCRIPTION("Update last descriptor in a set that includes an empty binding"); VkResult err; ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->ExpectSuccess(); // Create layout with two uniform buffer descriptors w/ empty binding between them OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, {1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 0 /*!*/, 0, nullptr}, {2, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); // Create a buffer to be used for update VkBufferCreateInfo buff_ci = {}; buff_ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buff_ci.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buff_ci.size = 256; buff_ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; VkBuffer buffer; err = vkCreateBuffer(m_device->device(), &buff_ci, NULL, &buffer); ASSERT_VK_SUCCESS(err); // Have to bind memory to buffer before descriptor update VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = 512; // one allocation for both buffers mem_alloc.memoryTypeIndex = 0; VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(m_device->device(), buffer, &mem_reqs); bool pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, 0); if (!pass) { printf("%s Failed to allocate memory.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), buffer, NULL); return; } // Make sure allocation is sufficiently large to accommodate buffer requirements if (mem_reqs.size > mem_alloc.allocationSize) { mem_alloc.allocationSize = mem_reqs.size; } VkDeviceMemory mem; err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, mem, 0); ASSERT_VK_SUCCESS(err); // Only update the descriptor at binding 2 VkDescriptorBufferInfo buff_info = {}; buff_info.buffer = buffer; buff_info.offset = 0; buff_info.range = VK_WHOLE_SIZE; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstBinding = 2; descriptor_write.descriptorCount = 1; descriptor_write.pTexelBufferView = nullptr; descriptor_write.pBufferInfo = &buff_info; descriptor_write.pImageInfo = nullptr; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_write.dstSet = ds.set_; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyNotFound(); // Cleanup vkFreeMemory(m_device->device(), mem, NULL); vkDestroyBuffer(m_device->device(), buffer, NULL); } TEST_F(VkLayerTest, MultiplePushDescriptorSets) { TEST_DESCRIPTION("Verify an error message for multiple push descriptor sets."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); } else { printf("%s Push Descriptors Extension not supported, skipping tests\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); auto push_descriptor_prop = GetPushDescriptorProperties(instance(), gpu()); if (push_descriptor_prop.maxPushDescriptors < 1) { // Some implementations report an invalid maxPushDescriptors of 0 printf("%s maxPushDescriptors is zero, skipping tests\n", kSkipPrefix); return; } VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dsl_binding.pImmutableSamplers = NULL; const unsigned int descriptor_set_layout_count = 2; std::vector ds_layouts; for (uint32_t i = 0; i < descriptor_set_layout_count; ++i) { dsl_binding.binding = i; ds_layouts.emplace_back(m_device, std::vector(1, dsl_binding), VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR); } const auto &ds_vk_layouts = MakeVkHandles(ds_layouts); VkPipelineLayout pipeline_layout; VkPipelineLayoutCreateInfo pipeline_layout_ci = {}; pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipeline_layout_ci.pNext = NULL; pipeline_layout_ci.pushConstantRangeCount = 0; pipeline_layout_ci.pPushConstantRanges = NULL; pipeline_layout_ci.setLayoutCount = ds_vk_layouts.size(); pipeline_layout_ci.pSetLayouts = ds_vk_layouts.data(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00293"); vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreateDescriptorUpdateTemplate) { TEST_DESCRIPTION("Verify error messages for invalid vkCreateDescriptorUpdateTemplate calls."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Note: Includes workaround for some implementations which incorrectly return 0 maxPushDescriptors if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME) && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME) && (GetPushDescriptorProperties(instance(), gpu()).maxPushDescriptors > 0)) { m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME); } else { printf("%s Push Descriptors and Descriptor Update Template Extensions not supported, skipping tests\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout_ub(m_device, {dsl_binding}); const VkDescriptorSetLayoutObj ds_layout_ub1(m_device, {dsl_binding}); const VkDescriptorSetLayoutObj ds_layout_ub_push(m_device, {dsl_binding}, VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR); const VkPipelineLayoutObj pipeline_layout(m_device, {{&ds_layout_ub, &ds_layout_ub1, &ds_layout_ub_push}}); PFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR = (PFN_vkCreateDescriptorUpdateTemplateKHR)vkGetDeviceProcAddr(m_device->device(), "vkCreateDescriptorUpdateTemplateKHR"); ASSERT_NE(vkCreateDescriptorUpdateTemplateKHR, nullptr); PFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR = (PFN_vkDestroyDescriptorUpdateTemplateKHR)vkGetDeviceProcAddr(m_device->device(), "vkDestroyDescriptorUpdateTemplateKHR"); ASSERT_NE(vkDestroyDescriptorUpdateTemplateKHR, nullptr); VkDescriptorUpdateTemplateEntry entries = {0, 0, 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 0, sizeof(VkBuffer)}; VkDescriptorUpdateTemplateCreateInfo create_info = {}; create_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO; create_info.pNext = nullptr; create_info.flags = 0; create_info.descriptorUpdateEntryCount = 1; create_info.pDescriptorUpdateEntries = &entries; auto do_test = [&](std::string err) { VkDescriptorUpdateTemplateKHR dut = VK_NULL_HANDLE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, err); if (VK_SUCCESS == vkCreateDescriptorUpdateTemplateKHR(m_device->handle(), &create_info, nullptr, &dut)) { vkDestroyDescriptorUpdateTemplateKHR(m_device->handle(), dut, nullptr); } m_errorMonitor->VerifyFound(); }; // Descriptor set type template create_info.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET; // descriptorSetLayout is NULL do_test("VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00350"); // Push descriptor type template create_info.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR; create_info.pipelineBindPoint = VK_PIPELINE_BIND_POINT_COMPUTE; create_info.pipelineLayout = pipeline_layout.handle(); create_info.set = 2; // Bad bindpoint -- force fuzz the bind point memset(&create_info.pipelineBindPoint, 0xFE, sizeof(create_info.pipelineBindPoint)); do_test("VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00351"); create_info.pipelineBindPoint = VK_PIPELINE_BIND_POINT_COMPUTE; // Bad pipeline layout create_info.pipelineLayout = VK_NULL_HANDLE; do_test("VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00352"); create_info.pipelineLayout = pipeline_layout.handle(); // Wrong set # create_info.set = 0; do_test("VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00353"); // Invalid set # create_info.set = 42; do_test("VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00353"); } // This is a positive test. No failures are expected. TEST_F(VkPositiveLayerTest, PushDescriptorNullDstSetTest) { TEST_DESCRIPTION("Use null dstSet in CmdPushDescriptorSetKHR"); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); } else { printf("%s Push Descriptors Extension not supported, skipping tests\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); m_errorMonitor->ExpectSuccess(); auto push_descriptor_prop = GetPushDescriptorProperties(instance(), gpu()); if (push_descriptor_prop.maxPushDescriptors < 1) { // Some implementations report an invalid maxPushDescriptors of 0 printf("%s maxPushDescriptors is zero, skipping tests\n", kSkipPrefix); return; } VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 2; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}, VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR); // Now use the descriptor layout to create a pipeline layout const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout}); static const float vbo_data[3] = {1.f, 0.f, 1.f}; VkConstantBufferObj vbo(m_device, sizeof(vbo_data), (const void *)&vbo_data, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT); VkDescriptorBufferInfo buff_info; buff_info.buffer = vbo.handle(); buff_info.offset = 0; buff_info.range = sizeof(vbo_data); VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstBinding = 2; descriptor_write.descriptorCount = 1; descriptor_write.pTexelBufferView = nullptr; descriptor_write.pBufferInfo = &buff_info; descriptor_write.pImageInfo = nullptr; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_write.dstSet = 0; // Should not cause a validation error // Find address of extension call and make the call PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR = (PFN_vkCmdPushDescriptorSetKHR)vkGetDeviceProcAddr(m_device->device(), "vkCmdPushDescriptorSetKHR"); assert(vkCmdPushDescriptorSetKHR != nullptr); m_commandBuffer->begin(); vkCmdPushDescriptorSetKHR(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptor_write); m_errorMonitor->VerifyNotFound(); } // This is a positive test. No failures are expected. TEST_F(VkPositiveLayerTest, PushDescriptorUnboundSetTest) { TEST_DESCRIPTION("Ensure that no validation errors are produced for not bound push descriptor sets"); VkResult err; if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); } else { printf("%s Push Descriptors Extension not supported, skipping tests\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); auto push_descriptor_prop = GetPushDescriptorProperties(instance(), gpu()); if (push_descriptor_prop.maxPushDescriptors < 1) { // Some implementations report an invalid maxPushDescriptors of 0 printf("%s maxPushDescriptors is zero, skipping tests\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_errorMonitor->ExpectSuccess(); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); // Create descriptor set layout VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 2; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); // Create push descriptor set layout const VkDescriptorSetLayoutObj push_ds_layout(m_device, {dsl_binding}, VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR); // Allocate descriptor set VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.descriptorPool = ds_pool; alloc_info.descriptorSetCount = 1; alloc_info.pSetLayouts = &ds_layout.handle(); VkDescriptorSet descriptor_set; err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptor_set); ASSERT_VK_SUCCESS(err); // Now use the descriptor layouts to create a pipeline layout const VkPipelineLayoutObj pipeline_layout(m_device, {&push_ds_layout, &ds_layout}); // Create PSO char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 x;\n" "layout(set=0) layout(binding=2) uniform foo1 { float x; } bar1;\n" "layout(set=1) layout(binding=2) uniform foo2 { float y; } bar2;\n" "void main(){\n" " x = vec4(bar1.x) + vec4(bar2.y);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.SetViewport(m_viewports); pipe.SetScissor(m_scissors); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); static const float bo_data[1] = {1.f}; VkConstantBufferObj buffer(m_device, sizeof(bo_data), (const void *)&bo_data, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT); // Update descriptor set VkDescriptorBufferInfo buff_info; buff_info.buffer = buffer.handle(); buff_info.offset = 0; buff_info.range = sizeof(bo_data); VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstBinding = 2; descriptor_write.descriptorCount = 1; descriptor_write.pTexelBufferView = nullptr; descriptor_write.pBufferInfo = &buff_info; descriptor_write.pImageInfo = nullptr; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_write.dstSet = descriptor_set; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR = (PFN_vkCmdPushDescriptorSetKHR)vkGetDeviceProcAddr(m_device->device(), "vkCmdPushDescriptorSetKHR"); assert(vkCmdPushDescriptorSetKHR != nullptr); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); // Push descriptors and bind descriptor set vkCmdPushDescriptorSetKHR(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptor_write); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 1, 1, &descriptor_set, 0, NULL); // No errors should be generated. vkCmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); m_errorMonitor->VerifyNotFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } // This is a positive test. No failures are expected. TEST_F(VkPositiveLayerTest, TestAliasedMemoryTracking) { VkResult err; bool pass; TEST_DESCRIPTION( "Create a buffer, allocate memory, bind memory, destroy the buffer, create an image, and bind the same memory to it"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); VkBuffer buffer; VkImage image; VkDeviceMemory mem; VkMemoryRequirements mem_reqs; VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.pNext = NULL; buf_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buf_info.size = 256; buf_info.queueFamilyIndexCount = 0; buf_info.pQueueFamilyIndices = NULL; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; buf_info.flags = 0; err = vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); vkGetBufferMemoryRequirements(m_device->device(), buffer, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; // Ensure memory is big enough for both bindings alloc_info.allocationSize = 0x10000; pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); if (!pass) { printf("%s Failed to allocate memory.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), buffer, NULL); return; } err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem); ASSERT_VK_SUCCESS(err); uint8_t *pData; err = vkMapMemory(m_device->device(), mem, 0, mem_reqs.size, 0, (void **)&pData); ASSERT_VK_SUCCESS(err); memset(pData, 0xCADECADE, static_cast(mem_reqs.size)); vkUnmapMemory(m_device->device(), mem); err = vkBindBufferMemory(m_device->device(), buffer, mem, 0); ASSERT_VK_SUCCESS(err); // NOW, destroy the buffer. Obviously, the resource no longer occupies this // memory. In fact, it was never used by the GPU. // Just be sure, wait for idle. vkDestroyBuffer(m_device->device(), buffer, NULL); vkDeviceWaitIdle(m_device->device()); // Use optimal as some platforms report linear support but then fail image creation VkImageTiling image_tiling = VK_IMAGE_TILING_OPTIMAL; VkImageFormatProperties image_format_properties; vkGetPhysicalDeviceImageFormatProperties(gpu(), VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_TYPE_2D, image_tiling, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, 0, &image_format_properties); if (image_format_properties.maxExtent.width == 0) { printf("%s Image format not supported; skipped.\n", kSkipPrefix); vkFreeMemory(m_device->device(), mem, NULL); return; } VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM; image_create_info.extent.width = 64; image_create_info.extent.height = 64; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = image_tiling; image_create_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image_create_info.queueFamilyIndexCount = 0; image_create_info.pQueueFamilyIndices = NULL; image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; image_create_info.flags = 0; /* Create a mappable image. It will be the texture if linear images are OK * to be textures or it will be the staging image if they are not. */ err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), image, &mem_reqs); VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = 0; mem_alloc.memoryTypeIndex = 0; mem_alloc.allocationSize = mem_reqs.size; pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); if (!pass) { printf("%s Failed to allocate memory.\n", kSkipPrefix); vkFreeMemory(m_device->device(), mem, NULL); vkDestroyImage(m_device->device(), image, NULL); return; } // VALIDATION FAILURE: err = vkBindImageMemory(m_device->device(), image, mem, 0); ASSERT_VK_SUCCESS(err); m_errorMonitor->VerifyNotFound(); vkFreeMemory(m_device->device(), mem, NULL); vkDestroyImage(m_device->device(), image, NULL); } // This is a positive test. No failures are expected. TEST_F(VkPositiveLayerTest, TestDestroyFreeNullHandles) { VkResult err; TEST_DESCRIPTION("Call all applicable destroy and free routines with NULL handles, expecting no validation errors"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); vkDestroyBuffer(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyBufferView(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyCommandPool(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyDescriptorPool(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyDescriptorSetLayout(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyDevice(VK_NULL_HANDLE, NULL); vkDestroyEvent(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyFence(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyFramebuffer(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyImage(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyImageView(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyInstance(VK_NULL_HANDLE, NULL); vkDestroyPipeline(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyPipelineCache(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyPipelineLayout(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyQueryPool(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyRenderPass(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroySampler(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroySemaphore(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyShaderModule(m_device->device(), VK_NULL_HANDLE, NULL); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffers[3] = {}; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 1; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &command_buffers[1]); vkFreeCommandBuffers(m_device->device(), command_pool, 3, command_buffers); vkDestroyCommandPool(m_device->device(), command_pool, NULL); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 2; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptor_sets[3] = {}; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptor_sets[1]); ASSERT_VK_SUCCESS(err); vkFreeDescriptorSets(m_device->device(), ds_pool, 3, descriptor_sets); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); vkFreeMemory(m_device->device(), VK_NULL_HANDLE, NULL); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, QueueSubmitSemaphoresAndLayoutTracking) { TEST_DESCRIPTION("Submit multiple command buffers with chained semaphore signals and layout transitions"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); VkCommandBuffer cmd_bufs[4]; VkCommandBufferAllocateInfo alloc_info; alloc_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.commandBufferCount = 4; alloc_info.commandPool = m_commandPool->handle(); alloc_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &alloc_info, cmd_bufs); VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT), VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkCommandBufferBeginInfo cb_binfo; cb_binfo.pNext = NULL; cb_binfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cb_binfo.pInheritanceInfo = VK_NULL_HANDLE; cb_binfo.flags = 0; // Use 4 command buffers, each with an image layout transition, ColorAO->General->ColorAO->TransferSrc->TransferDst vkBeginCommandBuffer(cmd_bufs[0], &cb_binfo); VkImageMemoryBarrier img_barrier = {}; img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier.pNext = NULL; img_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; img_barrier.dstAccessMask = VK_ACCESS_HOST_WRITE_BIT; img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.image = image.handle(); img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; vkCmdPipelineBarrier(cmd_bufs[0], VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); vkEndCommandBuffer(cmd_bufs[0]); vkBeginCommandBuffer(cmd_bufs[1], &cb_binfo); img_barrier.oldLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; vkCmdPipelineBarrier(cmd_bufs[1], VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); vkEndCommandBuffer(cmd_bufs[1]); vkBeginCommandBuffer(cmd_bufs[2], &cb_binfo); img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; vkCmdPipelineBarrier(cmd_bufs[2], VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); vkEndCommandBuffer(cmd_bufs[2]); vkBeginCommandBuffer(cmd_bufs[3], &cb_binfo); img_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; vkCmdPipelineBarrier(cmd_bufs[3], VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); vkEndCommandBuffer(cmd_bufs[3]); // Submit 4 command buffers in 3 submits, with submits 2 and 3 waiting for semaphores from submits 1 and 2 VkSemaphore semaphore1, semaphore2; VkSemaphoreCreateInfo semaphore_create_info{}; semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore1); vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore2); VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT}; VkSubmitInfo submit_info[3]; submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[0].pNext = nullptr; submit_info[0].commandBufferCount = 1; submit_info[0].pCommandBuffers = &cmd_bufs[0]; submit_info[0].signalSemaphoreCount = 1; submit_info[0].pSignalSemaphores = &semaphore1; submit_info[0].waitSemaphoreCount = 0; submit_info[0].pWaitDstStageMask = nullptr; submit_info[0].pWaitDstStageMask = flags; submit_info[1].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[1].pNext = nullptr; submit_info[1].commandBufferCount = 1; submit_info[1].pCommandBuffers = &cmd_bufs[1]; submit_info[1].waitSemaphoreCount = 1; submit_info[1].pWaitSemaphores = &semaphore1; submit_info[1].signalSemaphoreCount = 1; submit_info[1].pSignalSemaphores = &semaphore2; submit_info[1].pWaitDstStageMask = flags; submit_info[2].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[2].pNext = nullptr; submit_info[2].commandBufferCount = 2; submit_info[2].pCommandBuffers = &cmd_bufs[2]; submit_info[2].waitSemaphoreCount = 1; submit_info[2].pWaitSemaphores = &semaphore2; submit_info[2].signalSemaphoreCount = 0; submit_info[2].pSignalSemaphores = nullptr; submit_info[2].pWaitDstStageMask = flags; vkQueueSubmit(m_device->m_queue, 3, submit_info, VK_NULL_HANDLE); vkQueueWaitIdle(m_device->m_queue); vkDestroySemaphore(m_device->device(), semaphore1, NULL); vkDestroySemaphore(m_device->device(), semaphore2, NULL); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, DynamicOffsetWithInactiveBinding) { // Create a descriptorSet w/ dynamic descriptors where 1 binding is inactive // We previously had a bug where dynamic offset of inactive bindings was still being used VkResult err; m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {2, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, {1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, }); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); // Create two buffers to update the descriptors with // The first will be 2k and used for bindings 0 & 1, the second is 1k for binding 2 uint32_t qfi = 0; VkBufferCreateInfo buffCI = {}; buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffCI.size = 2048; buffCI.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buffCI.queueFamilyIndexCount = 1; buffCI.pQueueFamilyIndices = &qfi; VkBuffer dyub1; err = vkCreateBuffer(m_device->device(), &buffCI, NULL, &dyub1); ASSERT_VK_SUCCESS(err); // buffer2 buffCI.size = 1024; VkBuffer dyub2; err = vkCreateBuffer(m_device->device(), &buffCI, NULL, &dyub2); ASSERT_VK_SUCCESS(err); // Allocate memory and bind to buffers VkMemoryAllocateInfo mem_alloc[2] = {}; mem_alloc[0].sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc[0].pNext = NULL; mem_alloc[0].memoryTypeIndex = 0; mem_alloc[1].sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc[1].pNext = NULL; mem_alloc[1].memoryTypeIndex = 0; VkMemoryRequirements mem_reqs1; vkGetBufferMemoryRequirements(m_device->device(), dyub1, &mem_reqs1); VkMemoryRequirements mem_reqs2; vkGetBufferMemoryRequirements(m_device->device(), dyub2, &mem_reqs2); mem_alloc[0].allocationSize = mem_reqs1.size; bool pass = m_device->phy().set_memory_type(mem_reqs1.memoryTypeBits, &mem_alloc[0], 0); mem_alloc[1].allocationSize = mem_reqs2.size; pass &= m_device->phy().set_memory_type(mem_reqs2.memoryTypeBits, &mem_alloc[1], 0); if (!pass) { printf("%s Failed to allocate memory.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), dyub1, NULL); vkDestroyBuffer(m_device->device(), dyub2, NULL); return; } VkDeviceMemory mem1; err = vkAllocateMemory(m_device->device(), &mem_alloc[0], NULL, &mem1); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), dyub1, mem1, 0); ASSERT_VK_SUCCESS(err); VkDeviceMemory mem2; err = vkAllocateMemory(m_device->device(), &mem_alloc[1], NULL, &mem2); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), dyub2, mem2, 0); ASSERT_VK_SUCCESS(err); // Update descriptors const uint32_t BINDING_COUNT = 3; VkDescriptorBufferInfo buff_info[BINDING_COUNT] = {}; buff_info[0].buffer = dyub1; buff_info[0].offset = 0; buff_info[0].range = 256; buff_info[1].buffer = dyub1; buff_info[1].offset = 256; buff_info[1].range = 512; buff_info[2].buffer = dyub2; buff_info[2].offset = 0; buff_info[2].range = 512; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = BINDING_COUNT; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; descriptor_write.pBufferInfo = buff_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); // Create PSO to be used for draw-time errors below char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 x;\n" "layout(set=0) layout(binding=0) uniform foo1 { int x; int y; } bar1;\n" "layout(set=0) layout(binding=2) uniform foo2 { int x; int y; } bar2;\n" "void main(){\n" " x = vec4(bar1.y) + vec4(bar2.y);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.SetViewport(m_viewports); pipe.SetScissor(m_scissors); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); // This update should succeed, but offset of inactive binding 1 oversteps binding 2 buffer size // we used to have a bug in this case. uint32_t dyn_off[BINDING_COUNT] = {0, 1024, 256}; vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, BINDING_COUNT, dyn_off); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyNotFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); vkDestroyBuffer(m_device->device(), dyub1, NULL); vkDestroyBuffer(m_device->device(), dyub2, NULL); vkFreeMemory(m_device->device(), mem1, NULL); vkFreeMemory(m_device->device(), mem2, NULL); } TEST_F(VkPositiveLayerTest, NonCoherentMemoryMapping) { TEST_DESCRIPTION( "Ensure that validations handling of non-coherent memory mapping while using VK_WHOLE_SIZE does not cause access " "violations"); VkResult err; uint8_t *pData; ASSERT_NO_FATAL_FAILURE(Init()); VkDeviceMemory mem; VkMemoryRequirements mem_reqs; mem_reqs.memoryTypeBits = 0xFFFFFFFF; const VkDeviceSize atom_size = m_device->props.limits.nonCoherentAtomSize; VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; static const VkDeviceSize allocation_size = 32 * atom_size; alloc_info.allocationSize = allocation_size; // Find a memory configurations WITHOUT a COHERENT bit, otherwise exit bool pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); if (!pass) { pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); if (!pass) { pass = m_device->phy().set_memory_type( mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); if (!pass) { printf("%s Couldn't find a memory type wihtout a COHERENT bit.\n", kSkipPrefix); return; } } } err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem); ASSERT_VK_SUCCESS(err); // Map/Flush/Invalidate using WHOLE_SIZE and zero offsets and entire mapped range m_errorMonitor->ExpectSuccess(); err = vkMapMemory(m_device->device(), mem, 0, VK_WHOLE_SIZE, 0, (void **)&pData); ASSERT_VK_SUCCESS(err); VkMappedMemoryRange mmr = {}; mmr.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; mmr.memory = mem; mmr.offset = 0; mmr.size = VK_WHOLE_SIZE; err = vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr); ASSERT_VK_SUCCESS(err); err = vkInvalidateMappedMemoryRanges(m_device->device(), 1, &mmr); ASSERT_VK_SUCCESS(err); m_errorMonitor->VerifyNotFound(); vkUnmapMemory(m_device->device(), mem); // Map/Flush/Invalidate using WHOLE_SIZE and an offset and entire mapped range m_errorMonitor->ExpectSuccess(); err = vkMapMemory(m_device->device(), mem, 5 * atom_size, VK_WHOLE_SIZE, 0, (void **)&pData); ASSERT_VK_SUCCESS(err); mmr.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; mmr.memory = mem; mmr.offset = 6 * atom_size; mmr.size = VK_WHOLE_SIZE; err = vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr); ASSERT_VK_SUCCESS(err); err = vkInvalidateMappedMemoryRanges(m_device->device(), 1, &mmr); ASSERT_VK_SUCCESS(err); m_errorMonitor->VerifyNotFound(); vkUnmapMemory(m_device->device(), mem); // Map with offset and size // Flush/Invalidate subrange of mapped area with offset and size m_errorMonitor->ExpectSuccess(); err = vkMapMemory(m_device->device(), mem, 3 * atom_size, 9 * atom_size, 0, (void **)&pData); ASSERT_VK_SUCCESS(err); mmr.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; mmr.memory = mem; mmr.offset = 4 * atom_size; mmr.size = 2 * atom_size; err = vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr); ASSERT_VK_SUCCESS(err); err = vkInvalidateMappedMemoryRanges(m_device->device(), 1, &mmr); ASSERT_VK_SUCCESS(err); m_errorMonitor->VerifyNotFound(); vkUnmapMemory(m_device->device(), mem); // Map without offset and flush WHOLE_SIZE with two separate offsets m_errorMonitor->ExpectSuccess(); err = vkMapMemory(m_device->device(), mem, 0, VK_WHOLE_SIZE, 0, (void **)&pData); ASSERT_VK_SUCCESS(err); mmr.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; mmr.memory = mem; mmr.offset = allocation_size - (4 * atom_size); mmr.size = VK_WHOLE_SIZE; err = vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr); ASSERT_VK_SUCCESS(err); mmr.offset = allocation_size - (6 * atom_size); mmr.size = VK_WHOLE_SIZE; err = vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr); ASSERT_VK_SUCCESS(err); m_errorMonitor->VerifyNotFound(); vkUnmapMemory(m_device->device(), mem); vkFreeMemory(m_device->device(), mem, NULL); } // This is a positive test. We used to expect error in this case but spec now allows it TEST_F(VkPositiveLayerTest, ResetUnsignaledFence) { m_errorMonitor->ExpectSuccess(); vk_testing::Fence testFence; VkFenceCreateInfo fenceInfo = {}; fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceInfo.pNext = NULL; ASSERT_NO_FATAL_FAILURE(Init()); testFence.init(*m_device, fenceInfo); VkFence fences[1] = {testFence.handle()}; VkResult result = vkResetFences(m_device->device(), 1, fences); ASSERT_VK_SUCCESS(result); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CommandBufferSimultaneousUseSync) { m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); VkResult err; // Record (empty!) command buffer that can be submitted multiple times // simultaneously. VkCommandBufferBeginInfo cbbi = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT, nullptr}; m_commandBuffer->begin(&cbbi); m_commandBuffer->end(); VkFenceCreateInfo fci = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, nullptr, 0}; VkFence fence; err = vkCreateFence(m_device->device(), &fci, nullptr, &fence); ASSERT_VK_SUCCESS(err); VkSemaphoreCreateInfo sci = {VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, nullptr, 0}; VkSemaphore s1, s2; err = vkCreateSemaphore(m_device->device(), &sci, nullptr, &s1); ASSERT_VK_SUCCESS(err); err = vkCreateSemaphore(m_device->device(), &sci, nullptr, &s2); ASSERT_VK_SUCCESS(err); // Submit CB once signaling s1, with fence so we can roll forward to its retirement. VkSubmitInfo si = {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, nullptr, 1, &m_commandBuffer->handle(), 1, &s1}; err = vkQueueSubmit(m_device->m_queue, 1, &si, fence); ASSERT_VK_SUCCESS(err); // Submit CB again, signaling s2. si.pSignalSemaphores = &s2; err = vkQueueSubmit(m_device->m_queue, 1, &si, VK_NULL_HANDLE); ASSERT_VK_SUCCESS(err); // Wait for fence. err = vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX); ASSERT_VK_SUCCESS(err); // CB is still in flight from second submission, but semaphore s1 is no // longer in flight. delete it. vkDestroySemaphore(m_device->device(), s1, nullptr); m_errorMonitor->VerifyNotFound(); // Force device idle and clean up remaining objects vkDeviceWaitIdle(m_device->device()); vkDestroySemaphore(m_device->device(), s2, nullptr); vkDestroyFence(m_device->device(), fence, nullptr); } TEST_F(VkPositiveLayerTest, FenceCreateSignaledWaitHandling) { m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); VkResult err; // A fence created signaled VkFenceCreateInfo fci1 = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, nullptr, VK_FENCE_CREATE_SIGNALED_BIT}; VkFence f1; err = vkCreateFence(m_device->device(), &fci1, nullptr, &f1); ASSERT_VK_SUCCESS(err); // A fence created not VkFenceCreateInfo fci2 = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, nullptr, 0}; VkFence f2; err = vkCreateFence(m_device->device(), &fci2, nullptr, &f2); ASSERT_VK_SUCCESS(err); // Submit the unsignaled fence VkSubmitInfo si = {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, nullptr, 0, nullptr, 0, nullptr}; err = vkQueueSubmit(m_device->m_queue, 1, &si, f2); // Wait on both fences, with signaled first. VkFence fences[] = {f1, f2}; vkWaitForFences(m_device->device(), 2, fences, VK_TRUE, UINT64_MAX); // Should have both retired! vkDestroyFence(m_device->device(), f1, nullptr); vkDestroyFence(m_device->device(), f2, nullptr); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CreateImageViewFollowsParameterCompatibilityRequirements) { TEST_DESCRIPTION("Verify that creating an ImageView with valid usage does not generate validation errors."); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->ExpectSuccess(); VkImageCreateInfo imgInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT, VK_IMAGE_TYPE_2D, VK_FORMAT_R8G8B8A8_UNORM, {128, 128, 1}, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; VkImageObj image(m_device); image.init(&imgInfo); ASSERT_TRUE(image.initialized()); VkImageView imageView; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_R8G8B8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.baseArrayLayer = 0; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyNotFound(); vkDestroyImageView(m_device->device(), imageView, NULL); } TEST_F(VkPositiveLayerTest, ValidUsage) { TEST_DESCRIPTION("Verify that creating an image view from an image with valid usage doesn't generate validation errors"); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->ExpectSuccess(); // Verify that we can create a view with usage INPUT_ATTACHMENT VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView imageView; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_R8G8B8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyNotFound(); vkDestroyImageView(m_device->device(), imageView, NULL); } // This is a positive test. No failures are expected. TEST_F(VkPositiveLayerTest, BindSparse) { TEST_DESCRIPTION("Bind 2 memory ranges to one image using vkQueueBindSparse, destroy the image and then free the memory"); ASSERT_NO_FATAL_FAILURE(Init()); auto index = m_device->graphics_queue_node_index_; if (!(m_device->queue_props[index].queueFlags & VK_QUEUE_SPARSE_BINDING_BIT)) { printf("%s Graphics queue does not have sparse binding bit.\n", kSkipPrefix); return; } if (!m_device->phy().features().sparseBinding) { printf("%s Device does not support sparse bindings.\n", kSkipPrefix); return; } m_errorMonitor->ExpectSuccess(VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT); VkImage image; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 64; image_create_info.extent.height = 64; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image_create_info.flags = VK_IMAGE_CREATE_SPARSE_BINDING_BIT; VkResult err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); VkMemoryRequirements memory_reqs; VkDeviceMemory memory_one, memory_two; bool pass; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.pNext = NULL; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), image, &memory_reqs); // Find an image big enough to allow sparse mapping of 2 memory regions // Increase the image size until it is at least twice the // size of the required alignment, to ensure we can bind both // allocated memory blocks to the image on aligned offsets. while (memory_reqs.size < (memory_reqs.alignment * 2)) { vkDestroyImage(m_device->device(), image, nullptr); image_create_info.extent.width *= 2; image_create_info.extent.height *= 2; err = vkCreateImage(m_device->device(), &image_create_info, nullptr, &image); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), image, &memory_reqs); } // Allocate 2 memory regions of minimum alignment size, bind one at 0, the other // at the end of the first memory_info.allocationSize = memory_reqs.alignment; pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &memory_one); ASSERT_VK_SUCCESS(err); err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &memory_two); ASSERT_VK_SUCCESS(err); VkSparseMemoryBind binds[2]; binds[0].flags = 0; binds[0].memory = memory_one; binds[0].memoryOffset = 0; binds[0].resourceOffset = 0; binds[0].size = memory_info.allocationSize; binds[1].flags = 0; binds[1].memory = memory_two; binds[1].memoryOffset = 0; binds[1].resourceOffset = memory_info.allocationSize; binds[1].size = memory_info.allocationSize; VkSparseImageOpaqueMemoryBindInfo opaqueBindInfo; opaqueBindInfo.image = image; opaqueBindInfo.bindCount = 2; opaqueBindInfo.pBinds = binds; VkFence fence = VK_NULL_HANDLE; VkBindSparseInfo bindSparseInfo = {}; bindSparseInfo.sType = VK_STRUCTURE_TYPE_BIND_SPARSE_INFO; bindSparseInfo.imageOpaqueBindCount = 1; bindSparseInfo.pImageOpaqueBinds = &opaqueBindInfo; vkQueueBindSparse(m_device->m_queue, 1, &bindSparseInfo, fence); vkQueueWaitIdle(m_device->m_queue); vkDestroyImage(m_device->device(), image, NULL); vkFreeMemory(m_device->device(), memory_one, NULL); vkFreeMemory(m_device->device(), memory_two, NULL); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, BindSparseMetadata) { TEST_DESCRIPTION("Bind memory for the metadata aspect of a sparse image"); ASSERT_NO_FATAL_FAILURE(Init()); auto index = m_device->graphics_queue_node_index_; if (!(m_device->queue_props[index].queueFlags & VK_QUEUE_SPARSE_BINDING_BIT)) { printf("%s Graphics queue does not have sparse binding bit.\n", kSkipPrefix); return; } if (!m_device->phy().features().sparseResidencyImage2D) { printf("%s Device does not support sparse residency for images.\n", kSkipPrefix); return; } m_errorMonitor->ExpectSuccess(VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT); // Create a sparse image VkImage image; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 64; image_create_info.extent.height = 64; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; image_create_info.flags = VK_IMAGE_CREATE_SPARSE_BINDING_BIT | VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT; VkResult err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); // Query image memory requirements VkMemoryRequirements memory_reqs; vkGetImageMemoryRequirements(m_device->device(), image, &memory_reqs); // Query sparse memory requirements uint32_t sparse_reqs_count = 0; vkGetImageSparseMemoryRequirements(m_device->device(), image, &sparse_reqs_count, nullptr); std::vector sparse_reqs(sparse_reqs_count); vkGetImageSparseMemoryRequirements(m_device->device(), image, &sparse_reqs_count, sparse_reqs.data()); // Find requirements for metadata aspect const VkSparseImageMemoryRequirements *metadata_reqs = nullptr; for (auto const &aspect_sparse_reqs : sparse_reqs) { if (aspect_sparse_reqs.formatProperties.aspectMask == VK_IMAGE_ASPECT_METADATA_BIT) { metadata_reqs = &aspect_sparse_reqs; } } if (!metadata_reqs) { printf("%s Sparse image does not require memory for metadata.\n", kSkipPrefix); } else { // Allocate memory for the metadata VkDeviceMemory metadata_memory = VK_NULL_HANDLE; VkMemoryAllocateInfo metadata_memory_info = {}; metadata_memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; metadata_memory_info.allocationSize = metadata_reqs->imageMipTailSize; m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &metadata_memory_info, 0); err = vkAllocateMemory(m_device->device(), &metadata_memory_info, NULL, &metadata_memory); ASSERT_VK_SUCCESS(err); // Bind metadata VkSparseMemoryBind sparse_bind = {}; sparse_bind.resourceOffset = metadata_reqs->imageMipTailOffset; sparse_bind.size = metadata_reqs->imageMipTailSize; sparse_bind.memory = metadata_memory; sparse_bind.memoryOffset = 0; sparse_bind.flags = VK_SPARSE_MEMORY_BIND_METADATA_BIT; VkSparseImageOpaqueMemoryBindInfo opaque_bind_info = {}; opaque_bind_info.image = image; opaque_bind_info.bindCount = 1; opaque_bind_info.pBinds = &sparse_bind; VkBindSparseInfo bind_info = {}; bind_info.sType = VK_STRUCTURE_TYPE_BIND_SPARSE_INFO; bind_info.imageOpaqueBindCount = 1; bind_info.pImageOpaqueBinds = &opaque_bind_info; vkQueueBindSparse(m_device->m_queue, 1, &bind_info, VK_NULL_HANDLE); m_errorMonitor->VerifyNotFound(); // Cleanup vkQueueWaitIdle(m_device->m_queue); vkFreeMemory(m_device->device(), metadata_memory, NULL); } vkDestroyImage(m_device->device(), image, NULL); } TEST_F(VkPositiveLayerTest, FramebufferBindingDestroyCommandPool) { TEST_DESCRIPTION( "This test should pass. Create a Framebuffer and command buffer, bind them together, then destroy command pool and " "framebuffer and verify there are no errors."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); // A renderpass with one color attachment. VkAttachmentDescription attachment = {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkAttachmentReference att_ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &att_ref, nullptr, nullptr, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &attachment, 1, &subpass, 0, nullptr}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); // A compatible framebuffer. VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView view = image.targetView(VK_FORMAT_R8G8B8A8_UNORM); VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &view, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb); ASSERT_VK_SUCCESS(err); // Explicitly create a command buffer to bind the FB to so that we can then // destroy the command pool in order to implicitly free command buffer VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 1; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &command_buffer); // Begin our cmd buffer with renderpass using our framebuffer VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{0, 0}, {32, 32}}, 0, nullptr}; VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer, &begin_info); vkCmdBeginRenderPass(command_buffer, &rpbi, VK_SUBPASS_CONTENTS_INLINE); vkCmdEndRenderPass(command_buffer); vkEndCommandBuffer(command_buffer); // Destroy command pool to implicitly free command buffer vkDestroyCommandPool(m_device->device(), command_pool, NULL); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyRenderPass(m_device->device(), rp, nullptr); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, FramebufferCreateDepthStencilLayoutTransitionForDepthOnlyImageView) { TEST_DESCRIPTION( "Validate that when an imageView of a depth/stencil image is used as a depth/stencil framebuffer attachment, the " "aspectMask is ignored and both depth and stencil image subresources are used."); ASSERT_NO_FATAL_FAILURE(Init()); VkFormatProperties format_properties; vkGetPhysicalDeviceFormatProperties(gpu(), VK_FORMAT_D32_SFLOAT_S8_UINT, &format_properties); if (!(format_properties.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) { printf("%s Image format does not support sampling.\n", kSkipPrefix); return; } m_errorMonitor->ExpectSuccess(); VkAttachmentDescription attachment = {0, VK_FORMAT_D32_SFLOAT_S8_UINT, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}; VkAttachmentReference att_ref = {0, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, &att_ref, 0, nullptr}; VkSubpassDependency dep = {0, 0, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_DEPENDENCY_BY_REGION_BIT}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &attachment, 1, &subpass, 1, &dep}; VkResult err; VkRenderPass rp; err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); VkImageObj image(m_device); image.InitNoLayout(32, 32, 1, VK_FORMAT_D32_SFLOAT_S8_UINT, 0x26, // usage VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); image.SetLayout(0x6, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL); VkImageViewCreateInfo ivci = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, nullptr, 0, image.handle(), VK_IMAGE_VIEW_TYPE_2D, VK_FORMAT_D32_SFLOAT_S8_UINT, {VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A}, {0x2, 0, 1, 0, 1}, }; VkImageView view; err = vkCreateImageView(m_device->device(), &ivci, nullptr, &view); ASSERT_VK_SUCCESS(err); VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &view, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); VkImageMemoryBarrier imb = {}; imb.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; imb.pNext = nullptr; imb.srcAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; imb.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; imb.oldLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; imb.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; imb.srcQueueFamilyIndex = 0; imb.dstQueueFamilyIndex = 0; imb.image = image.handle(); imb.subresourceRange.aspectMask = 0x6; imb.subresourceRange.baseMipLevel = 0; imb.subresourceRange.levelCount = 0x1; imb.subresourceRange.baseArrayLayer = 0; imb.subresourceRange.layerCount = 0x1; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &imb); m_commandBuffer->end(); m_commandBuffer->QueueCommandBuffer(false); m_errorMonitor->VerifyNotFound(); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyRenderPass(m_device->device(), rp, nullptr); vkDestroyImageView(m_device->device(), view, nullptr); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, BarrierLayoutToImageUsage) { TEST_DESCRIPTION("Ensure barriers' new and old VkImageLayout are compatible with their images' VkImageUsageFlags"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkImageMemoryBarrier img_barrier = {}; img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier.pNext = NULL; img_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; img_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; { VkImageObj img_color(m_device); img_color.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_color.initialized()); VkImageObj img_ds1(m_device); img_ds1.Init(128, 128, 1, depth_format, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_ds1.initialized()); VkImageObj img_ds2(m_device); img_ds2.Init(128, 128, 1, depth_format, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_ds2.initialized()); VkImageObj img_xfer_src(m_device); img_xfer_src.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_xfer_src.initialized()); VkImageObj img_xfer_dst(m_device); img_xfer_dst.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_xfer_dst.initialized()); VkImageObj img_sampled(m_device); img_sampled.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_sampled.initialized()); VkImageObj img_input(m_device); img_input.Init(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_input.initialized()); const struct { VkImageObj &image_obj; VkImageLayout old_layout; VkImageLayout new_layout; } buffer_layouts[] = { // clang-format off {img_color, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL}, {img_ds1, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL}, {img_ds2, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL}, {img_sampled, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL}, {img_input, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL}, {img_xfer_src, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL}, {img_xfer_dst, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL}, // clang-format on }; const uint32_t layout_count = sizeof(buffer_layouts) / sizeof(buffer_layouts[0]); m_commandBuffer->begin(); for (uint32_t i = 0; i < layout_count; ++i) { img_barrier.image = buffer_layouts[i].image_obj.handle(); const VkImageUsageFlags usage = buffer_layouts[i].image_obj.usage(); img_barrier.subresourceRange.aspectMask = (usage == VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) ? (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT) : VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.oldLayout = buffer_layouts[i].old_layout; img_barrier.newLayout = buffer_layouts[i].new_layout; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); img_barrier.oldLayout = buffer_layouts[i].new_layout; img_barrier.newLayout = buffer_layouts[i].old_layout; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); } m_commandBuffer->end(); img_barrier.oldLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; } m_errorMonitor->VerifyNotFound(); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, WaitEventThenSet) { TEST_DESCRIPTION("Wait on a event then set it after the wait has been submitted."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); VkEvent event; VkEventCreateInfo event_create_info{}; event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; vkCreateEvent(m_device->device(), &event_create_info, nullptr, &event); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 1; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &command_buffer); VkQueue queue = VK_NULL_HANDLE; vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 0, &queue); { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer, &begin_info); vkCmdWaitEvents(command_buffer, 1, &event, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 0, nullptr); vkCmdResetEvent(command_buffer, event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT); vkEndCommandBuffer(command_buffer); } { VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer; submit_info.signalSemaphoreCount = 0; submit_info.pSignalSemaphores = nullptr; vkQueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE); } { vkSetEvent(m_device->device(), event); } vkQueueWaitIdle(queue); vkDestroyEvent(m_device->device(), event, nullptr); vkFreeCommandBuffers(m_device->device(), command_pool, 1, &command_buffer); vkDestroyCommandPool(m_device->device(), command_pool, NULL); m_errorMonitor->VerifyNotFound(); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, QueryAndCopySecondaryCommandBuffers) { TEST_DESCRIPTION("Issue a query on a secondary command buffer and copy it on a primary."); ASSERT_NO_FATAL_FAILURE(Init()); if ((m_device->queue_props.empty()) || (m_device->queue_props[0].queueCount < 2)) { printf("%s Queue family needs to have multiple queues to run this test.\n", kSkipPrefix); return; } m_errorMonitor->ExpectSuccess(); VkQueryPool query_pool; VkQueryPoolCreateInfo query_pool_create_info{}; query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; query_pool_create_info.queryType = VK_QUERY_TYPE_TIMESTAMP; query_pool_create_info.queryCount = 1; vkCreateQueryPool(m_device->device(), &query_pool_create_info, nullptr, &query_pool); VkCommandPoolObj command_pool(m_device, m_device->graphics_queue_node_index_, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT); VkCommandBufferObj primary_buffer(m_device, &command_pool); VkCommandBufferObj secondary_buffer(m_device, &command_pool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); VkQueue queue = VK_NULL_HANDLE; vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 1, &queue); uint32_t qfi = 0; VkBufferCreateInfo buff_create_info = {}; buff_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buff_create_info.size = 1024; buff_create_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT; buff_create_info.queueFamilyIndexCount = 1; buff_create_info.pQueueFamilyIndices = &qfi; VkResult err; VkBuffer buffer; err = vkCreateBuffer(m_device->device(), &buff_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements memReqs; vkGetBufferMemoryRequirements(m_device->device(), buffer, &memReqs); VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = memReqs.size; mem_alloc.memoryTypeIndex = 0; bool pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &mem_alloc, 0); if (!pass) { printf("%s Failed to allocate memory.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), buffer, NULL); return; } VkDeviceMemory mem; err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, mem, 0); ASSERT_VK_SUCCESS(err); VkCommandBufferInheritanceInfo hinfo = {}; hinfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; hinfo.renderPass = VK_NULL_HANDLE; hinfo.subpass = 0; hinfo.framebuffer = VK_NULL_HANDLE; hinfo.occlusionQueryEnable = VK_FALSE; hinfo.queryFlags = 0; hinfo.pipelineStatistics = 0; { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; begin_info.pInheritanceInfo = &hinfo; secondary_buffer.begin(&begin_info); vkCmdResetQueryPool(secondary_buffer.handle(), query_pool, 0, 1); vkCmdWriteTimestamp(secondary_buffer.handle(), VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, query_pool, 0); secondary_buffer.end(); primary_buffer.begin(); vkCmdExecuteCommands(primary_buffer.handle(), 1, &secondary_buffer.handle()); vkCmdCopyQueryPoolResults(primary_buffer.handle(), query_pool, 0, 1, buffer, 0, 0, 0); primary_buffer.end(); } primary_buffer.QueueCommandBuffer(); vkQueueWaitIdle(queue); vkDestroyQueryPool(m_device->device(), query_pool, nullptr); vkDestroyBuffer(m_device->device(), buffer, NULL); vkFreeMemory(m_device->device(), mem, NULL); m_errorMonitor->VerifyNotFound(); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, QueryAndCopyMultipleCommandBuffers) { TEST_DESCRIPTION("Issue a query and copy from it on a second command buffer."); ASSERT_NO_FATAL_FAILURE(Init()); if ((m_device->queue_props.empty()) || (m_device->queue_props[0].queueCount < 2)) { printf("%s Queue family needs to have multiple queues to run this test.\n", kSkipPrefix); return; } m_errorMonitor->ExpectSuccess(); VkQueryPool query_pool; VkQueryPoolCreateInfo query_pool_create_info{}; query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; query_pool_create_info.queryType = VK_QUERY_TYPE_TIMESTAMP; query_pool_create_info.queryCount = 1; vkCreateQueryPool(m_device->device(), &query_pool_create_info, nullptr, &query_pool); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer[2]; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 2; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer); VkQueue queue = VK_NULL_HANDLE; vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 1, &queue); uint32_t qfi = 0; VkBufferCreateInfo buff_create_info = {}; buff_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buff_create_info.size = 1024; buff_create_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT; buff_create_info.queueFamilyIndexCount = 1; buff_create_info.pQueueFamilyIndices = &qfi; VkResult err; VkBuffer buffer; err = vkCreateBuffer(m_device->device(), &buff_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements memReqs; vkGetBufferMemoryRequirements(m_device->device(), buffer, &memReqs); VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = memReqs.size; mem_alloc.memoryTypeIndex = 0; bool pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &mem_alloc, 0); if (!pass) { vkDestroyBuffer(m_device->device(), buffer, NULL); return; } VkDeviceMemory mem; err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, mem, 0); ASSERT_VK_SUCCESS(err); { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[0], &begin_info); vkCmdResetQueryPool(command_buffer[0], query_pool, 0, 1); vkCmdWriteTimestamp(command_buffer[0], VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, query_pool, 0); vkEndCommandBuffer(command_buffer[0]); vkBeginCommandBuffer(command_buffer[1], &begin_info); vkCmdCopyQueryPoolResults(command_buffer[1], query_pool, 0, 1, buffer, 0, 0, 0); vkEndCommandBuffer(command_buffer[1]); } { VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 2; submit_info.pCommandBuffers = command_buffer; submit_info.signalSemaphoreCount = 0; submit_info.pSignalSemaphores = nullptr; vkQueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE); } vkQueueWaitIdle(queue); vkDestroyQueryPool(m_device->device(), query_pool, nullptr); vkFreeCommandBuffers(m_device->device(), command_pool, 2, command_buffer); vkDestroyCommandPool(m_device->device(), command_pool, NULL); vkDestroyBuffer(m_device->device(), buffer, NULL); vkFreeMemory(m_device->device(), mem, NULL); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, ResetEventThenSet) { TEST_DESCRIPTION("Reset an event then set it after the reset has been submitted."); ASSERT_NO_FATAL_FAILURE(Init()); VkEvent event; VkEventCreateInfo event_create_info{}; event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; vkCreateEvent(m_device->device(), &event_create_info, nullptr, &event); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 1; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &command_buffer); VkQueue queue = VK_NULL_HANDLE; vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 0, &queue); { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer, &begin_info); vkCmdResetEvent(command_buffer, event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT); vkEndCommandBuffer(command_buffer); } { VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer; submit_info.signalSemaphoreCount = 0; submit_info.pSignalSemaphores = nullptr; vkQueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE); } { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "that is already in use by a command buffer."); vkSetEvent(m_device->device(), event); m_errorMonitor->VerifyFound(); } vkQueueWaitIdle(queue); vkDestroyEvent(m_device->device(), event, nullptr); vkFreeCommandBuffers(m_device->device(), command_pool, 1, &command_buffer); vkDestroyCommandPool(m_device->device(), command_pool, NULL); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, TwoFencesThreeFrames) { TEST_DESCRIPTION( "Two command buffers with two separate fences are each run through a Submit & WaitForFences cycle 3 times. This previously " "revealed a bug so running this positive test to prevent a regression."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); VkQueue queue = VK_NULL_HANDLE; vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 0, &queue); static const uint32_t NUM_OBJECTS = 2; static const uint32_t NUM_FRAMES = 3; VkCommandBuffer cmd_buffers[NUM_OBJECTS] = {}; VkFence fences[NUM_OBJECTS] = {}; VkCommandPool cmd_pool; VkCommandPoolCreateInfo cmd_pool_ci = {}; cmd_pool_ci.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; cmd_pool_ci.queueFamilyIndex = m_device->graphics_queue_node_index_; cmd_pool_ci.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; VkResult err = vkCreateCommandPool(m_device->device(), &cmd_pool_ci, nullptr, &cmd_pool); ASSERT_VK_SUCCESS(err); VkCommandBufferAllocateInfo cmd_buf_info = {}; cmd_buf_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; cmd_buf_info.commandPool = cmd_pool; cmd_buf_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; cmd_buf_info.commandBufferCount = 1; VkFenceCreateInfo fence_ci = {}; fence_ci.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fence_ci.pNext = nullptr; fence_ci.flags = 0; for (uint32_t i = 0; i < NUM_OBJECTS; ++i) { err = vkAllocateCommandBuffers(m_device->device(), &cmd_buf_info, &cmd_buffers[i]); ASSERT_VK_SUCCESS(err); err = vkCreateFence(m_device->device(), &fence_ci, nullptr, &fences[i]); ASSERT_VK_SUCCESS(err); } for (uint32_t frame = 0; frame < NUM_FRAMES; ++frame) { for (uint32_t obj = 0; obj < NUM_OBJECTS; ++obj) { // Create empty cmd buffer VkCommandBufferBeginInfo cmdBufBeginDesc = {}; cmdBufBeginDesc.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; err = vkBeginCommandBuffer(cmd_buffers[obj], &cmdBufBeginDesc); ASSERT_VK_SUCCESS(err); err = vkEndCommandBuffer(cmd_buffers[obj]); ASSERT_VK_SUCCESS(err); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &cmd_buffers[obj]; // Submit cmd buffer and wait for fence err = vkQueueSubmit(queue, 1, &submit_info, fences[obj]); ASSERT_VK_SUCCESS(err); err = vkWaitForFences(m_device->device(), 1, &fences[obj], VK_TRUE, UINT64_MAX); ASSERT_VK_SUCCESS(err); err = vkResetFences(m_device->device(), 1, &fences[obj]); ASSERT_VK_SUCCESS(err); } } m_errorMonitor->VerifyNotFound(); vkDestroyCommandPool(m_device->device(), cmd_pool, NULL); for (uint32_t i = 0; i < NUM_OBJECTS; ++i) { vkDestroyFence(m_device->device(), fences[i], nullptr); } } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, TwoQueueSubmitsSeparateQueuesWithSemaphoreAndOneFenceQWI) { TEST_DESCRIPTION( "Two command buffers, each in a separate QueueSubmit call submitted on separate queues followed by a QueueWaitIdle."); ASSERT_NO_FATAL_FAILURE(Init()); if ((m_device->queue_props.empty()) || (m_device->queue_props[0].queueCount < 2)) { printf("%s Queue family needs to have multiple queues to run this test.\n", kSkipPrefix); return; } m_errorMonitor->ExpectSuccess(); VkSemaphore semaphore; VkSemaphoreCreateInfo semaphore_create_info{}; semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer[2]; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 2; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer); VkQueue queue = VK_NULL_HANDLE; vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 1, &queue); { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[0], &begin_info); vkCmdPipelineBarrier(command_buffer[0], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 0, nullptr); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[0], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[0]); } { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[1], &begin_info); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[1], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[1]); } { VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[0]; submit_info.signalSemaphoreCount = 1; submit_info.pSignalSemaphores = &semaphore; vkQueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE); } { VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT}; VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[1]; submit_info.waitSemaphoreCount = 1; submit_info.pWaitSemaphores = &semaphore; submit_info.pWaitDstStageMask = flags; vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); } vkQueueWaitIdle(m_device->m_queue); vkDestroySemaphore(m_device->device(), semaphore, nullptr); vkFreeCommandBuffers(m_device->device(), command_pool, 2, &command_buffer[0]); vkDestroyCommandPool(m_device->device(), command_pool, NULL); m_errorMonitor->VerifyNotFound(); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, TwoQueueSubmitsSeparateQueuesWithSemaphoreAndOneFenceQWIFence) { TEST_DESCRIPTION( "Two command buffers, each in a separate QueueSubmit call submitted on separate queues, the second having a fence followed " "by a QueueWaitIdle."); ASSERT_NO_FATAL_FAILURE(Init()); if ((m_device->queue_props.empty()) || (m_device->queue_props[0].queueCount < 2)) { printf("%s Queue family needs to have multiple queues to run this test.\n", kSkipPrefix); return; } m_errorMonitor->ExpectSuccess(); VkFence fence; VkFenceCreateInfo fence_create_info{}; fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence); VkSemaphore semaphore; VkSemaphoreCreateInfo semaphore_create_info{}; semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer[2]; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 2; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer); VkQueue queue = VK_NULL_HANDLE; vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 1, &queue); { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[0], &begin_info); vkCmdPipelineBarrier(command_buffer[0], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 0, nullptr); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[0], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[0]); } { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[1], &begin_info); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[1], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[1]); } { VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[0]; submit_info.signalSemaphoreCount = 1; submit_info.pSignalSemaphores = &semaphore; vkQueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE); } { VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT}; VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[1]; submit_info.waitSemaphoreCount = 1; submit_info.pWaitSemaphores = &semaphore; submit_info.pWaitDstStageMask = flags; vkQueueSubmit(m_device->m_queue, 1, &submit_info, fence); } vkQueueWaitIdle(m_device->m_queue); vkDestroyFence(m_device->device(), fence, nullptr); vkDestroySemaphore(m_device->device(), semaphore, nullptr); vkFreeCommandBuffers(m_device->device(), command_pool, 2, &command_buffer[0]); vkDestroyCommandPool(m_device->device(), command_pool, NULL); m_errorMonitor->VerifyNotFound(); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, TwoQueueSubmitsSeparateQueuesWithSemaphoreAndOneFenceTwoWFF) { TEST_DESCRIPTION( "Two command buffers, each in a separate QueueSubmit call submitted on separate queues, the second having a fence followed " "by two consecutive WaitForFences calls on the same fence."); ASSERT_NO_FATAL_FAILURE(Init()); if ((m_device->queue_props.empty()) || (m_device->queue_props[0].queueCount < 2)) { printf("%s Queue family needs to have multiple queues to run this test.\n", kSkipPrefix); return; } m_errorMonitor->ExpectSuccess(); VkFence fence; VkFenceCreateInfo fence_create_info{}; fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence); VkSemaphore semaphore; VkSemaphoreCreateInfo semaphore_create_info{}; semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer[2]; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 2; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer); VkQueue queue = VK_NULL_HANDLE; vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 1, &queue); { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[0], &begin_info); vkCmdPipelineBarrier(command_buffer[0], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 0, nullptr); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[0], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[0]); } { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[1], &begin_info); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[1], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[1]); } { VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[0]; submit_info.signalSemaphoreCount = 1; submit_info.pSignalSemaphores = &semaphore; vkQueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE); } { VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT}; VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[1]; submit_info.waitSemaphoreCount = 1; submit_info.pWaitSemaphores = &semaphore; submit_info.pWaitDstStageMask = flags; vkQueueSubmit(m_device->m_queue, 1, &submit_info, fence); } vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX); vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX); vkDestroyFence(m_device->device(), fence, nullptr); vkDestroySemaphore(m_device->device(), semaphore, nullptr); vkFreeCommandBuffers(m_device->device(), command_pool, 2, &command_buffer[0]); vkDestroyCommandPool(m_device->device(), command_pool, NULL); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, TwoQueuesEnsureCorrectRetirementWithWorkStolen) { ASSERT_NO_FATAL_FAILURE(Init()); if ((m_device->queue_props.empty()) || (m_device->queue_props[0].queueCount < 2)) { printf("%s Test requires two queues, skipping\n", kSkipPrefix); return; } VkResult err; m_errorMonitor->ExpectSuccess(); VkQueue q0 = m_device->m_queue; VkQueue q1 = nullptr; vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 1, &q1); ASSERT_NE(q1, nullptr); // An (empty) command buffer. We must have work in the first submission -- // the layer treats unfenced work differently from fenced work. VkCommandPoolCreateInfo cpci = {VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, nullptr, 0, 0}; VkCommandPool pool; err = vkCreateCommandPool(m_device->device(), &cpci, nullptr, &pool); ASSERT_VK_SUCCESS(err); VkCommandBufferAllocateInfo cbai = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, nullptr, pool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, 1}; VkCommandBuffer cb; err = vkAllocateCommandBuffers(m_device->device(), &cbai, &cb); ASSERT_VK_SUCCESS(err); VkCommandBufferBeginInfo cbbi = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, 0, nullptr}; err = vkBeginCommandBuffer(cb, &cbbi); ASSERT_VK_SUCCESS(err); err = vkEndCommandBuffer(cb); ASSERT_VK_SUCCESS(err); // A semaphore VkSemaphoreCreateInfo sci = {VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, nullptr, 0}; VkSemaphore s; err = vkCreateSemaphore(m_device->device(), &sci, nullptr, &s); ASSERT_VK_SUCCESS(err); // First submission, to q0 VkSubmitInfo s0 = {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, nullptr, 1, &cb, 1, &s}; err = vkQueueSubmit(q0, 1, &s0, VK_NULL_HANDLE); ASSERT_VK_SUCCESS(err); // Second submission, to q1, waiting on s VkFlags waitmask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; // doesn't really matter what this value is. VkSubmitInfo s1 = {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 1, &s, &waitmask, 0, nullptr, 0, nullptr}; err = vkQueueSubmit(q1, 1, &s1, VK_NULL_HANDLE); ASSERT_VK_SUCCESS(err); // Wait for q0 idle err = vkQueueWaitIdle(q0); ASSERT_VK_SUCCESS(err); // Command buffer should have been completed (it was on q0); reset the pool. vkFreeCommandBuffers(m_device->device(), pool, 1, &cb); m_errorMonitor->VerifyNotFound(); // Force device completely idle and clean up resources vkDeviceWaitIdle(m_device->device()); vkDestroyCommandPool(m_device->device(), pool, nullptr); vkDestroySemaphore(m_device->device(), s, nullptr); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, TwoQueueSubmitsSeparateQueuesWithSemaphoreAndOneFence) { TEST_DESCRIPTION( "Two command buffers, each in a separate QueueSubmit call submitted on separate queues, the second having a fence, " "followed by a WaitForFences call."); ASSERT_NO_FATAL_FAILURE(Init()); if ((m_device->queue_props.empty()) || (m_device->queue_props[0].queueCount < 2)) { printf("%s Queue family needs to have multiple queues to run this test.\n", kSkipPrefix); return; } m_errorMonitor->ExpectSuccess(); VkFence fence; VkFenceCreateInfo fence_create_info{}; fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence); VkSemaphore semaphore; VkSemaphoreCreateInfo semaphore_create_info{}; semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer[2]; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 2; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer); VkQueue queue = VK_NULL_HANDLE; vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 1, &queue); { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[0], &begin_info); vkCmdPipelineBarrier(command_buffer[0], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 0, nullptr); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[0], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[0]); } { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[1], &begin_info); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[1], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[1]); } { VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[0]; submit_info.signalSemaphoreCount = 1; submit_info.pSignalSemaphores = &semaphore; vkQueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE); } { VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT}; VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[1]; submit_info.waitSemaphoreCount = 1; submit_info.pWaitSemaphores = &semaphore; submit_info.pWaitDstStageMask = flags; vkQueueSubmit(m_device->m_queue, 1, &submit_info, fence); } vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX); vkDestroyFence(m_device->device(), fence, nullptr); vkDestroySemaphore(m_device->device(), semaphore, nullptr); vkFreeCommandBuffers(m_device->device(), command_pool, 2, &command_buffer[0]); vkDestroyCommandPool(m_device->device(), command_pool, NULL); m_errorMonitor->VerifyNotFound(); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, TwoQueueSubmitsOneQueueWithSemaphoreAndOneFence) { TEST_DESCRIPTION( "Two command buffers, each in a separate QueueSubmit call on the same queue, sharing a signal/wait semaphore, the second " "having a fence, followed by a WaitForFences call."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); VkFence fence; VkFenceCreateInfo fence_create_info{}; fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence); VkSemaphore semaphore; VkSemaphoreCreateInfo semaphore_create_info{}; semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer[2]; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 2; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer); { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[0], &begin_info); vkCmdPipelineBarrier(command_buffer[0], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 0, nullptr); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[0], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[0]); } { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[1], &begin_info); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[1], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[1]); } { VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[0]; submit_info.signalSemaphoreCount = 1; submit_info.pSignalSemaphores = &semaphore; vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); } { VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT}; VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[1]; submit_info.waitSemaphoreCount = 1; submit_info.pWaitSemaphores = &semaphore; submit_info.pWaitDstStageMask = flags; vkQueueSubmit(m_device->m_queue, 1, &submit_info, fence); } vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX); vkDestroyFence(m_device->device(), fence, nullptr); vkDestroySemaphore(m_device->device(), semaphore, nullptr); vkFreeCommandBuffers(m_device->device(), command_pool, 2, &command_buffer[0]); vkDestroyCommandPool(m_device->device(), command_pool, NULL); m_errorMonitor->VerifyNotFound(); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, TwoQueueSubmitsOneQueueNullQueueSubmitWithFence) { TEST_DESCRIPTION( "Two command buffers, each in a separate QueueSubmit call on the same queue, no fences, followed by a third QueueSubmit " "with NO SubmitInfos but with a fence, followed by a WaitForFences call."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); VkFence fence; VkFenceCreateInfo fence_create_info{}; fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer[2]; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 2; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer); { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[0], &begin_info); vkCmdPipelineBarrier(command_buffer[0], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 0, nullptr); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[0], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[0]); } { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[1], &begin_info); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[1], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[1]); } { VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[0]; submit_info.signalSemaphoreCount = 0; submit_info.pSignalSemaphores = VK_NULL_HANDLE; vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); } { VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT}; VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[1]; submit_info.waitSemaphoreCount = 0; submit_info.pWaitSemaphores = VK_NULL_HANDLE; submit_info.pWaitDstStageMask = flags; vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); } vkQueueSubmit(m_device->m_queue, 0, NULL, fence); VkResult err = vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX); ASSERT_VK_SUCCESS(err); vkDestroyFence(m_device->device(), fence, nullptr); vkFreeCommandBuffers(m_device->device(), command_pool, 2, &command_buffer[0]); vkDestroyCommandPool(m_device->device(), command_pool, NULL); m_errorMonitor->VerifyNotFound(); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, TwoQueueSubmitsOneQueueOneFence) { TEST_DESCRIPTION( "Two command buffers, each in a separate QueueSubmit call on the same queue, the second having a fence, followed by a " "WaitForFences call."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); VkFence fence; VkFenceCreateInfo fence_create_info{}; fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer[2]; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 2; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer); { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[0], &begin_info); vkCmdPipelineBarrier(command_buffer[0], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 0, nullptr); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[0], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[0]); } { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[1], &begin_info); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[1], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[1]); } { VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[0]; submit_info.signalSemaphoreCount = 0; submit_info.pSignalSemaphores = VK_NULL_HANDLE; vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); } { VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT}; VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[1]; submit_info.waitSemaphoreCount = 0; submit_info.pWaitSemaphores = VK_NULL_HANDLE; submit_info.pWaitDstStageMask = flags; vkQueueSubmit(m_device->m_queue, 1, &submit_info, fence); } vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX); vkDestroyFence(m_device->device(), fence, nullptr); vkFreeCommandBuffers(m_device->device(), command_pool, 2, &command_buffer[0]); vkDestroyCommandPool(m_device->device(), command_pool, NULL); m_errorMonitor->VerifyNotFound(); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, TwoSubmitInfosWithSemaphoreOneQueueSubmitsOneFence) { TEST_DESCRIPTION( "Two command buffers each in a separate SubmitInfo sent in a single QueueSubmit call followed by a WaitForFences call."); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->ExpectSuccess(); VkFence fence; VkFenceCreateInfo fence_create_info{}; fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence); VkSemaphore semaphore; VkSemaphoreCreateInfo semaphore_create_info{}; semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer[2]; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 2; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer); { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[0], &begin_info); vkCmdPipelineBarrier(command_buffer[0], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 0, nullptr); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[0], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[0]); } { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[1], &begin_info); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[1], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[1]); } { VkSubmitInfo submit_info[2]; VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT}; submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[0].pNext = NULL; submit_info[0].commandBufferCount = 1; submit_info[0].pCommandBuffers = &command_buffer[0]; submit_info[0].signalSemaphoreCount = 1; submit_info[0].pSignalSemaphores = &semaphore; submit_info[0].waitSemaphoreCount = 0; submit_info[0].pWaitSemaphores = NULL; submit_info[0].pWaitDstStageMask = 0; submit_info[1].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[1].pNext = NULL; submit_info[1].commandBufferCount = 1; submit_info[1].pCommandBuffers = &command_buffer[1]; submit_info[1].waitSemaphoreCount = 1; submit_info[1].pWaitSemaphores = &semaphore; submit_info[1].pWaitDstStageMask = flags; submit_info[1].signalSemaphoreCount = 0; submit_info[1].pSignalSemaphores = NULL; vkQueueSubmit(m_device->m_queue, 2, &submit_info[0], fence); } vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX); vkDestroyFence(m_device->device(), fence, nullptr); vkFreeCommandBuffers(m_device->device(), command_pool, 2, &command_buffer[0]); vkDestroyCommandPool(m_device->device(), command_pool, NULL); vkDestroySemaphore(m_device->device(), semaphore, nullptr); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CreatePipelineAttribMatrixType) { TEST_DESCRIPTION("Test that pipeline validation accepts matrices passed as vertex attributes"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkVertexInputBindingDescription input_binding; memset(&input_binding, 0, sizeof(input_binding)); VkVertexInputAttributeDescription input_attribs[2]; memset(input_attribs, 0, sizeof(input_attribs)); for (int i = 0; i < 2; i++) { input_attribs[i].format = VK_FORMAT_R32G32B32A32_SFLOAT; input_attribs[i].location = i; } char const *vsSource = "#version 450\n" "\n" "layout(location=0) in mat2x4 x;\n" "void main(){\n" " gl_Position = x[0] + x[1];\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddVertexInputBindings(&input_binding, 1); pipe.AddVertexInputAttribs(input_attribs, 2); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); /* expect success */ m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CreatePipelineAttribArrayType) { m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkVertexInputBindingDescription input_binding; memset(&input_binding, 0, sizeof(input_binding)); VkVertexInputAttributeDescription input_attribs[2]; memset(input_attribs, 0, sizeof(input_attribs)); for (int i = 0; i < 2; i++) { input_attribs[i].format = VK_FORMAT_R32G32B32A32_SFLOAT; input_attribs[i].location = i; } char const *vsSource = "#version 450\n" "\n" "layout(location=0) in vec4 x[2];\n" "void main(){\n" " gl_Position = x[0] + x[1];\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddVertexInputBindings(&input_binding, 1); pipe.AddVertexInputAttribs(input_attribs, 2); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CreatePipelineAttribComponents) { TEST_DESCRIPTION( "Test that pipeline validation accepts consuming a vertex attribute through multiple vertex shader inputs, each consuming " "a different subset of the components, and that fragment shader-attachment validation tolerates multiple duplicate " "location outputs"); m_errorMonitor->ExpectSuccess(VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkVertexInputBindingDescription input_binding; memset(&input_binding, 0, sizeof(input_binding)); VkVertexInputAttributeDescription input_attribs[3]; memset(input_attribs, 0, sizeof(input_attribs)); for (int i = 0; i < 3; i++) { input_attribs[i].format = VK_FORMAT_R32G32B32A32_SFLOAT; input_attribs[i].location = i; } char const *vsSource = "#version 450\n" "\n" "layout(location=0) in vec4 x;\n" "layout(location=1) in vec3 y1;\n" "layout(location=1, component=3) in float y2;\n" "layout(location=2) in vec4 z;\n" "void main(){\n" " gl_Position = x + vec4(y1, y2) + z;\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0, component=0) out float color0;\n" "layout(location=0, component=1) out float color1;\n" "layout(location=0, component=2) out float color2;\n" "layout(location=0, component=3) out float color3;\n" "layout(location=1, component=0) out vec2 second_color0;\n" "layout(location=1, component=2) out vec2 second_color1;\n" "void main(){\n" " color0 = float(1);\n" " second_color0 = vec2(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); // Create a renderPass with two color attachments VkAttachmentReference attachments[2] = {}; attachments[0].layout = VK_IMAGE_LAYOUT_GENERAL; attachments[1].attachment = 1; attachments[1].layout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription subpass = {}; subpass.pColorAttachments = attachments; subpass.colorAttachmentCount = 2; VkRenderPassCreateInfo rpci = {}; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 2; VkAttachmentDescription attach_desc[2] = {}; attach_desc[0].format = VK_FORMAT_B8G8R8A8_UNORM; attach_desc[0].samples = VK_SAMPLE_COUNT_1_BIT; attach_desc[0].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; attach_desc[0].finalLayout = VK_IMAGE_LAYOUT_GENERAL; attach_desc[0].loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; attach_desc[1].format = VK_FORMAT_B8G8R8A8_UNORM; attach_desc[1].samples = VK_SAMPLE_COUNT_1_BIT; attach_desc[1].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; attach_desc[1].finalLayout = VK_IMAGE_LAYOUT_GENERAL; attach_desc[1].loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; rpci.pAttachments = attach_desc; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; VkRenderPass renderpass; vkCreateRenderPass(m_device->device(), &rpci, NULL, &renderpass); pipe.AddShader(&vs); pipe.AddShader(&fs); VkPipelineColorBlendAttachmentState att_state1 = {}; att_state1.dstAlphaBlendFactor = VK_BLEND_FACTOR_CONSTANT_COLOR; att_state1.blendEnable = VK_FALSE; pipe.AddColorAttachment(0, att_state1); pipe.AddColorAttachment(1, att_state1); pipe.AddVertexInputBindings(&input_binding, 1); pipe.AddVertexInputAttribs(input_attribs, 3); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderpass); vkDestroyRenderPass(m_device->device(), renderpass, nullptr); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CreatePipelineSimplePositive) { m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "void main(){\n" " gl_Position = vec4(0);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CreatePipelineRelaxedTypeMatch) { TEST_DESCRIPTION( "Test that pipeline validation accepts the relaxed type matching rules set out in 14.1.3: fundamental type must match, and " "producer side must have at least as many components"); m_errorMonitor->ExpectSuccess(); // VK 1.0.8 Specification, 14.1.3 "Additionally,..." block ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "layout(location=0) out vec3 x;\n" "layout(location=1) out ivec3 y;\n" "layout(location=2) out vec3 z;\n" "void main(){\n" " gl_Position = vec4(0);\n" " x = vec3(0); y = ivec3(0); z = vec3(0);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "layout(location=0) in float x;\n" "layout(location=1) flat in int y;\n" "layout(location=2) in vec2 z;\n" "void main(){\n" " color = vec4(1 + x + y + z.x);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); VkResult err = VK_SUCCESS; err = pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); ASSERT_VK_SUCCESS(err); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CreatePipelineTessPerVertex) { TEST_DESCRIPTION("Test that pipeline validation accepts per-vertex variables passed between the TCS and TES stages"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); if (!m_device->phy().features().tessellationShader) { printf("%s Device does not support tessellation shaders; skipped.\n", kSkipPrefix); return; } char const *vsSource = "#version 450\n" "void main(){}\n"; char const *tcsSource = "#version 450\n" "layout(location=0) out int x[];\n" "layout(vertices=3) out;\n" "void main(){\n" " gl_TessLevelOuter[0] = gl_TessLevelOuter[1] = gl_TessLevelOuter[2] = 1;\n" " gl_TessLevelInner[0] = 1;\n" " x[gl_InvocationID] = gl_InvocationID;\n" "}\n"; char const *tesSource = "#version 450\n" "layout(triangles, equal_spacing, cw) in;\n" "layout(location=0) in int x[];\n" "void main(){\n" " gl_Position.xyz = gl_TessCoord;\n" " gl_Position.w = x[0] + x[1] + x[2];\n" "}\n"; char const *fsSource = "#version 450\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj tcs(m_device, tcsSource, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, this); VkShaderObj tes(m_device, tesSource, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineInputAssemblyStateCreateInfo iasci{VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, nullptr, 0, VK_PRIMITIVE_TOPOLOGY_PATCH_LIST, VK_FALSE}; VkPipelineTessellationStateCreateInfo tsci{VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, nullptr, 0, 3}; VkPipelineObj pipe(m_device); pipe.SetInputAssembly(&iasci); pipe.SetTessellation(&tsci); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&tcs); pipe.AddShader(&tes); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CreatePipelineGeometryInputBlockPositive) { TEST_DESCRIPTION( "Test that pipeline validation accepts a user-defined interface block passed into the geometry shader. This is interesting " "because the 'extra' array level is not present on the member type, but on the block instance."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); if (!m_device->phy().features().geometryShader) { printf("%s Device does not support geometry shaders; skipped.\n", kSkipPrefix); return; } char const *vsSource = "#version 450\n" "layout(location=0) out VertexData { vec4 x; } vs_out;\n" "void main(){\n" " vs_out.x = vec4(1);\n" "}\n"; char const *gsSource = "#version 450\n" "layout(triangles) in;\n" "layout(triangle_strip, max_vertices=3) out;\n" "layout(location=0) in VertexData { vec4 x; } gs_in[];\n" "void main() {\n" " gl_Position = gs_in[0].x;\n" " EmitVertex();\n" "}\n"; char const *fsSource = "#version 450\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj gs(m_device, gsSource, VK_SHADER_STAGE_GEOMETRY_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&gs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CreatePipeline64BitAttributesPositive) { TEST_DESCRIPTION( "Test that pipeline validation accepts basic use of 64bit vertex attributes. This is interesting because they consume " "multiple locations."); m_errorMonitor->ExpectSuccess(); if (!EnableDeviceProfileLayer()) { printf("%s Failed to enable device profile layer.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); if (!m_device->phy().features().shaderFloat64) { printf("%s Device does not support 64bit vertex attributes; skipped.\n", kSkipPrefix); return; } // Set 64bit format to support VTX Buffer feature PFN_vkSetPhysicalDeviceFormatPropertiesEXT fpvkSetPhysicalDeviceFormatPropertiesEXT = nullptr; PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT = nullptr; // Load required functions if (!LoadDeviceProfileLayer(fpvkSetPhysicalDeviceFormatPropertiesEXT, fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT)) { return; } VkFormatProperties format_props; fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R64G64B64A64_SFLOAT, &format_props); format_props.bufferFeatures |= VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT; fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R64G64B64A64_SFLOAT, format_props); VkVertexInputBindingDescription input_bindings[1]; memset(input_bindings, 0, sizeof(input_bindings)); VkVertexInputAttributeDescription input_attribs[4]; memset(input_attribs, 0, sizeof(input_attribs)); input_attribs[0].location = 0; input_attribs[0].offset = 0; input_attribs[0].format = VK_FORMAT_R64G64B64A64_SFLOAT; input_attribs[1].location = 2; input_attribs[1].offset = 32; input_attribs[1].format = VK_FORMAT_R64G64B64A64_SFLOAT; input_attribs[2].location = 4; input_attribs[2].offset = 64; input_attribs[2].format = VK_FORMAT_R64G64B64A64_SFLOAT; input_attribs[3].location = 6; input_attribs[3].offset = 96; input_attribs[3].format = VK_FORMAT_R64G64B64A64_SFLOAT; char const *vsSource = "#version 450\n" "\n" "layout(location=0) in dmat4 x;\n" "void main(){\n" " gl_Position = vec4(x[0][0]);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddVertexInputBindings(input_bindings, 1); pipe.AddVertexInputAttribs(input_attribs, 4); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CreatePipelineInputAttachmentPositive) { TEST_DESCRIPTION("Positive test for a correctly matched input attachment"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(input_attachment_index=0, set=0, binding=0) uniform subpassInput x;\n" "layout(location=0) out vec4 color;\n" "void main() {\n" " color = subpassLoad(x);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetLayoutBinding dslb = {0, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; const VkDescriptorSetLayoutObj dsl(m_device, {dslb}); const VkPipelineLayoutObj pl(m_device, {&dsl}); VkAttachmentDescription descs[2] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL}, }; VkAttachmentReference color = { 0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, }; VkAttachmentReference input = { 1, VK_IMAGE_LAYOUT_GENERAL, }; VkSubpassDescription sd = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 1, &input, 1, &color, nullptr, nullptr, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 2, descs, 1, &sd, 0, nullptr}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); // should be OK. would go wrong here if it's going to... pipe.CreateVKPipeline(pl.handle(), rp); m_errorMonitor->VerifyNotFound(); vkDestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkPositiveLayerTest, CreateComputePipelineMissingDescriptorUnusedPositive) { TEST_DESCRIPTION( "Test that pipeline validation accepts a compute pipeline which declares a descriptor-backed resource which is not " "provided, but the shader does not statically use it. This is interesting because it requires compute pipelines to have a " "proper descriptor use walk, which they didn't for some time."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); char const *csSource = "#version 450\n" "\n" "layout(local_size_x=1) in;\n" "layout(set=0, binding=0) buffer block { vec4 x; };\n" "void main(){\n" " // x is not used.\n" "}\n"; VkShaderObj cs(m_device, csSource, VK_SHADER_STAGE_COMPUTE_BIT, this); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); VkComputePipelineCreateInfo cpci = {VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, nullptr, 0, {VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, nullptr, 0, VK_SHADER_STAGE_COMPUTE_BIT, cs.handle(), "main", nullptr}, descriptorSet.GetPipelineLayout(), VK_NULL_HANDLE, -1}; VkPipeline pipe; VkResult err = vkCreateComputePipelines(m_device->device(), VK_NULL_HANDLE, 1, &cpci, nullptr, &pipe); m_errorMonitor->VerifyNotFound(); if (err == VK_SUCCESS) { vkDestroyPipeline(m_device->device(), pipe, nullptr); } } TEST_F(VkPositiveLayerTest, CreateComputePipelineCombinedImageSamplerConsumedAsSampler) { TEST_DESCRIPTION( "Test that pipeline validation accepts a shader consuming only the sampler portion of a combined image + sampler"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); std::vector bindings = { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, {1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, {2, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, }; const VkDescriptorSetLayoutObj dsl(m_device, bindings); const VkPipelineLayoutObj pl(m_device, {&dsl}); char const *csSource = "#version 450\n" "\n" "layout(local_size_x=1) in;\n" "layout(set=0, binding=0) uniform sampler s;\n" "layout(set=0, binding=1) uniform texture2D t;\n" "layout(set=0, binding=2) buffer block { vec4 x; };\n" "void main() {\n" " x = texture(sampler2D(t, s), vec2(0));\n" "}\n"; VkShaderObj cs(m_device, csSource, VK_SHADER_STAGE_COMPUTE_BIT, this); VkComputePipelineCreateInfo cpci = {VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, nullptr, 0, {VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, nullptr, 0, VK_SHADER_STAGE_COMPUTE_BIT, cs.handle(), "main", nullptr}, pl.handle(), VK_NULL_HANDLE, -1}; VkPipeline pipe; VkResult err = vkCreateComputePipelines(m_device->device(), VK_NULL_HANDLE, 1, &cpci, nullptr, &pipe); m_errorMonitor->VerifyNotFound(); if (err == VK_SUCCESS) { vkDestroyPipeline(m_device->device(), pipe, nullptr); } } TEST_F(VkPositiveLayerTest, CreateComputePipelineCombinedImageSamplerConsumedAsImage) { TEST_DESCRIPTION( "Test that pipeline validation accepts a shader consuming only the image portion of a combined image + sampler"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); std::vector bindings = { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, {1, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, {2, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, }; const VkDescriptorSetLayoutObj dsl(m_device, bindings); const VkPipelineLayoutObj pl(m_device, {&dsl}); char const *csSource = "#version 450\n" "\n" "layout(local_size_x=1) in;\n" "layout(set=0, binding=0) uniform texture2D t;\n" "layout(set=0, binding=1) uniform sampler s;\n" "layout(set=0, binding=2) buffer block { vec4 x; };\n" "void main() {\n" " x = texture(sampler2D(t, s), vec2(0));\n" "}\n"; VkShaderObj cs(m_device, csSource, VK_SHADER_STAGE_COMPUTE_BIT, this); VkComputePipelineCreateInfo cpci = {VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, nullptr, 0, {VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, nullptr, 0, VK_SHADER_STAGE_COMPUTE_BIT, cs.handle(), "main", nullptr}, pl.handle(), VK_NULL_HANDLE, -1}; VkPipeline pipe; VkResult err = vkCreateComputePipelines(m_device->device(), VK_NULL_HANDLE, 1, &cpci, nullptr, &pipe); m_errorMonitor->VerifyNotFound(); if (err == VK_SUCCESS) { vkDestroyPipeline(m_device->device(), pipe, nullptr); } } TEST_F(VkPositiveLayerTest, CreateComputePipelineCombinedImageSamplerConsumedAsBoth) { TEST_DESCRIPTION( "Test that pipeline validation accepts a shader consuming both the sampler and the image of a combined image+sampler but " "via separate variables"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); std::vector bindings = { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, {1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, }; const VkDescriptorSetLayoutObj dsl(m_device, bindings); const VkPipelineLayoutObj pl(m_device, {&dsl}); char const *csSource = "#version 450\n" "\n" "layout(local_size_x=1) in;\n" "layout(set=0, binding=0) uniform texture2D t;\n" "layout(set=0, binding=0) uniform sampler s; // both binding 0!\n" "layout(set=0, binding=1) buffer block { vec4 x; };\n" "void main() {\n" " x = texture(sampler2D(t, s), vec2(0));\n" "}\n"; VkShaderObj cs(m_device, csSource, VK_SHADER_STAGE_COMPUTE_BIT, this); VkComputePipelineCreateInfo cpci = {VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, nullptr, 0, {VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, nullptr, 0, VK_SHADER_STAGE_COMPUTE_BIT, cs.handle(), "main", nullptr}, pl.handle(), VK_NULL_HANDLE, -1}; VkPipeline pipe; VkResult err = vkCreateComputePipelines(m_device->device(), VK_NULL_HANDLE, 1, &cpci, nullptr, &pipe); m_errorMonitor->VerifyNotFound(); if (err == VK_SUCCESS) { vkDestroyPipeline(m_device->device(), pipe, nullptr); } } TEST_F(VkPositiveLayerTest, CreateDescriptorSetBindingWithIgnoredSamplers) { TEST_DESCRIPTION("Test that layers conditionally do ignore the pImmutableSamplers on vkCreateDescriptorSetLayout"); bool prop2_found = false; if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); prop2_found = true; } else { printf("%s %s Extension not supported, skipping push descriptor sub-tests\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool push_descriptor_found = false; if (prop2_found && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); // In addition to the extension being supported we need to have at least one available // Some implementations report an invalid maxPushDescriptors of 0 push_descriptor_found = GetPushDescriptorProperties(instance(), gpu()).maxPushDescriptors > 0; } else { printf("%s %s Extension not supported, skipping push descriptor sub-tests\n", kSkipPrefix, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitState()); const uint64_t fake_address_64 = 0xCDCDCDCDCDCDCDCD; const uint64_t fake_address_32 = 0xCDCDCDCD; const void *fake_pointer = sizeof(void *) == 8 ? reinterpret_cast(fake_address_64) : reinterpret_cast(fake_address_32); const VkSampler *hopefully_undereferencable_pointer = reinterpret_cast(fake_pointer); // regular descriptors m_errorMonitor->ExpectSuccess(); { const VkDescriptorSetLayoutBinding non_sampler_bindings[] = { {0, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {2, VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {3, VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {4, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {5, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {6, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {7, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {8, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, }; const VkDescriptorSetLayoutCreateInfo dslci = {VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, nullptr, 0, static_cast(size(non_sampler_bindings)), non_sampler_bindings}; VkDescriptorSetLayout dsl; const VkResult err = vkCreateDescriptorSetLayout(m_device->device(), &dslci, nullptr, &dsl); ASSERT_VK_SUCCESS(err); vkDestroyDescriptorSetLayout(m_device->device(), dsl, nullptr); } m_errorMonitor->VerifyNotFound(); if (push_descriptor_found) { // push descriptors m_errorMonitor->ExpectSuccess(); { const VkDescriptorSetLayoutBinding non_sampler_bindings[] = { {0, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {2, VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {3, VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {4, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {5, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {6, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, }; const VkDescriptorSetLayoutCreateInfo dslci = {VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, nullptr, VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR, static_cast(size(non_sampler_bindings)), non_sampler_bindings}; VkDescriptorSetLayout dsl; const VkResult err = vkCreateDescriptorSetLayout(m_device->device(), &dslci, nullptr, &dsl); ASSERT_VK_SUCCESS(err); vkDestroyDescriptorSetLayout(m_device->device(), dsl, nullptr); } m_errorMonitor->VerifyNotFound(); } } TEST_F(VkPositiveLayerTest, Maintenance1Tests) { TEST_DESCRIPTION("Validate various special cases for the Maintenance1_KHR extension"); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); } else { printf("%s Maintenance1 Extension not supported, skipping tests\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); m_errorMonitor->ExpectSuccess(); VkCommandBufferObj cmd_buf(m_device, m_commandPool); cmd_buf.begin(); // Set Negative height, should give error if Maintenance 1 is not enabled VkViewport viewport = {0, 0, 16, -16, 0, 1}; vkCmdSetViewport(cmd_buf.handle(), 0, 1, &viewport); cmd_buf.end(); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, DuplicateValidPNextStructures) { TEST_DESCRIPTION("Create a pNext chain containing valid structures, but with a duplicate structure type"); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME); } else { printf("%s VK_NV_dedicated_allocation extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); // Create two pNext structures which by themselves would be valid VkDedicatedAllocationBufferCreateInfoNV dedicated_buffer_create_info = {}; VkDedicatedAllocationBufferCreateInfoNV dedicated_buffer_create_info_2 = {}; dedicated_buffer_create_info.sType = VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV; dedicated_buffer_create_info.pNext = &dedicated_buffer_create_info_2; dedicated_buffer_create_info.dedicatedAllocation = VK_TRUE; dedicated_buffer_create_info_2.sType = VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV; dedicated_buffer_create_info_2.pNext = nullptr; dedicated_buffer_create_info_2.dedicatedAllocation = VK_TRUE; uint32_t queue_family_index = 0; VkBufferCreateInfo buffer_create_info = {}; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.pNext = &dedicated_buffer_create_info; buffer_create_info.size = 1024; buffer_create_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buffer_create_info.queueFamilyIndexCount = 1; buffer_create_info.pQueueFamilyIndices = &queue_family_index; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "chain contains duplicate structure types"); VkBuffer buffer; vkCreateBuffer(m_device->device(), &buffer_create_info, NULL, &buffer); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DedicatedAllocation) { ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); } else { printf("%s Dedicated allocation extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkMemoryPropertyFlags mem_flags = 0; const VkDeviceSize resource_size = 1024; auto buffer_info = VkBufferObj::create_info(resource_size, VK_BUFFER_USAGE_TRANSFER_DST_BIT); VkBufferObj buffer; buffer.init_no_mem(*m_device, buffer_info); auto buffer_alloc_info = vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, buffer.memory_requirements(), mem_flags); auto buffer_dedicated_info = lvl_init_struct(); buffer_dedicated_info.buffer = buffer.handle(); buffer_alloc_info.pNext = &buffer_dedicated_info; vk_testing::DeviceMemory dedicated_buffer_memory; dedicated_buffer_memory.init(*m_device, buffer_alloc_info); VkBufferObj wrong_buffer; wrong_buffer.init_no_mem(*m_device, buffer_info); // Bind with wrong buffer m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-memory-01508"); vkBindBufferMemory(m_device->handle(), wrong_buffer.handle(), dedicated_buffer_memory.handle(), 0); m_errorMonitor->VerifyFound(); // Bind with non-zero offset (same VUID) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-memory-01508"); // offset must be zero m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-size-01037"); // offset pushes us past size auto offset = buffer.memory_requirements().alignment; vkBindBufferMemory(m_device->handle(), buffer.handle(), dedicated_buffer_memory.handle(), offset); m_errorMonitor->VerifyFound(); // Bind correctly (depends on the "skip" above) m_errorMonitor->ExpectSuccess(); vkBindBufferMemory(m_device->handle(), buffer.handle(), dedicated_buffer_memory.handle(), 0); m_errorMonitor->VerifyNotFound(); // And for images... vk_testing::Image image; vk_testing::Image wrong_image; auto image_info = vk_testing::Image::create_info(); image_info.extent.width = resource_size; image_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT; image_info.format = VK_FORMAT_R8G8B8A8_UNORM; image.init_no_mem(*m_device, image_info); wrong_image.init_no_mem(*m_device, image_info); auto image_dedicated_info = lvl_init_struct(); image_dedicated_info.image = image.handle(); auto image_alloc_info = vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, image.memory_requirements(), mem_flags); image_alloc_info.pNext = &image_dedicated_info; vk_testing::DeviceMemory dedicated_image_memory; dedicated_image_memory.init(*m_device, image_alloc_info); // Bind with wrong image m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-memory-01509"); vkBindImageMemory(m_device->handle(), wrong_image.handle(), dedicated_image_memory.handle(), 0); m_errorMonitor->VerifyFound(); // Bind with non-zero offset (same VUID) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-memory-01509"); // offset must be zero m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-size-01049"); // offset pushes us past size auto image_offset = image.memory_requirements().alignment; vkBindImageMemory(m_device->handle(), image.handle(), dedicated_image_memory.handle(), image_offset); m_errorMonitor->VerifyFound(); // Bind correctly (depends on the "skip" above) m_errorMonitor->ExpectSuccess(); vkBindImageMemory(m_device->handle(), image.handle(), dedicated_image_memory.handle(), 0); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, ValidStructPNext) { TEST_DESCRIPTION("Verify that a valid pNext value is handled correctly"); // Positive test to check parameter_validation and unique_objects support for NV_dedicated_allocation ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME); } else { printf("%s VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME Extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); m_errorMonitor->ExpectSuccess(); VkDedicatedAllocationBufferCreateInfoNV dedicated_buffer_create_info = {}; dedicated_buffer_create_info.sType = VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV; dedicated_buffer_create_info.pNext = nullptr; dedicated_buffer_create_info.dedicatedAllocation = VK_TRUE; uint32_t queue_family_index = 0; VkBufferCreateInfo buffer_create_info = {}; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.pNext = &dedicated_buffer_create_info; buffer_create_info.size = 1024; buffer_create_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buffer_create_info.queueFamilyIndexCount = 1; buffer_create_info.pQueueFamilyIndices = &queue_family_index; VkBuffer buffer; VkResult err = vkCreateBuffer(m_device->device(), &buffer_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements memory_reqs; vkGetBufferMemoryRequirements(m_device->device(), buffer, &memory_reqs); VkDedicatedAllocationMemoryAllocateInfoNV dedicated_memory_info = {}; dedicated_memory_info.sType = VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV; dedicated_memory_info.pNext = nullptr; dedicated_memory_info.buffer = buffer; dedicated_memory_info.image = VK_NULL_HANDLE; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.pNext = &dedicated_memory_info; memory_info.allocationSize = memory_reqs.size; bool pass; pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); VkDeviceMemory buffer_memory; err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &buffer_memory); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, buffer_memory, 0); ASSERT_VK_SUCCESS(err); vkDestroyBuffer(m_device->device(), buffer, NULL); vkFreeMemory(m_device->device(), buffer_memory, NULL); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, PSOPolygonModeValid) { TEST_DESCRIPTION("Verify that using a solid polygon fill mode works correctly."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); std::vector device_extension_names; auto features = m_device->phy().features(); // Artificially disable support for non-solid fill modes features.fillModeNonSolid = false; // The sacrificial device object VkDeviceObj test_device(0, gpu(), device_extension_names, &features); VkRenderpassObj render_pass(&test_device); const VkPipelineLayoutObj pipeline_layout(&test_device); VkPipelineRasterizationStateCreateInfo rs_ci = {}; rs_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rs_ci.pNext = nullptr; rs_ci.lineWidth = 1.0f; rs_ci.rasterizerDiscardEnable = false; VkShaderObj vs(&test_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(&test_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); // Set polygonMode=FILL. No error is expected m_errorMonitor->ExpectSuccess(); { VkPipelineObj pipe(&test_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); // Set polygonMode to a good value rs_ci.polygonMode = VK_POLYGON_MODE_FILL; pipe.SetRasterization(&rs_ci); pipe.CreateVKPipeline(pipeline_layout.handle(), render_pass.handle()); } m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, LongSemaphoreChain) { m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); VkResult err; std::vector semaphores; const int chainLength = 32768; VkPipelineStageFlags flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; for (int i = 0; i < chainLength; i++) { VkSemaphoreCreateInfo sci = {VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, nullptr, 0}; VkSemaphore semaphore; err = vkCreateSemaphore(m_device->device(), &sci, nullptr, &semaphore); ASSERT_VK_SUCCESS(err); semaphores.push_back(semaphore); VkSubmitInfo si = {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, semaphores.size() > 1 ? 1u : 0u, semaphores.size() > 1 ? &semaphores[semaphores.size() - 2] : nullptr, &flags, 0, nullptr, 1, &semaphores[semaphores.size() - 1]}; err = vkQueueSubmit(m_device->m_queue, 1, &si, VK_NULL_HANDLE); ASSERT_VK_SUCCESS(err); } VkFenceCreateInfo fci = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, nullptr, 0}; VkFence fence; err = vkCreateFence(m_device->device(), &fci, nullptr, &fence); ASSERT_VK_SUCCESS(err); VkSubmitInfo si = {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 1, &semaphores.back(), &flags, 0, nullptr, 0, nullptr}; err = vkQueueSubmit(m_device->m_queue, 1, &si, fence); ASSERT_VK_SUCCESS(err); vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX); for (auto semaphore : semaphores) vkDestroySemaphore(m_device->device(), semaphore, nullptr); vkDestroyFence(m_device->device(), fence, nullptr); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, ExternalSemaphore) { #ifdef _WIN32 const auto extension_name = VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME; const auto handle_type = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR; #else const auto extension_name = VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME; const auto handle_type = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR; #endif // Check for external semaphore instance extensions if (InstanceExtensionSupported(VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME); m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s External semaphore extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Check for external semaphore device extensions if (DeviceExtensionSupported(gpu(), nullptr, extension_name)) { m_device_extension_names.push_back(extension_name); m_device_extension_names.push_back(VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME); } else { printf("%s External semaphore extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); // Check for external semaphore import and export capability VkPhysicalDeviceExternalSemaphoreInfoKHR esi = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR, nullptr, handle_type}; VkExternalSemaphorePropertiesKHR esp = {VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR, nullptr}; auto vkGetPhysicalDeviceExternalSemaphorePropertiesKHR = (PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR)vkGetInstanceProcAddr( instance(), "vkGetPhysicalDeviceExternalSemaphorePropertiesKHR"); vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(gpu(), &esi, &esp); if (!(esp.externalSemaphoreFeatures & VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR) || !(esp.externalSemaphoreFeatures & VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR)) { printf("%s External semaphore does not support importing and exporting, skipping test\n", kSkipPrefix); return; } VkResult err; m_errorMonitor->ExpectSuccess(); // Create a semaphore to export payload from VkExportSemaphoreCreateInfoKHR esci = {VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR, nullptr, handle_type}; VkSemaphoreCreateInfo sci = {VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, &esci, 0}; VkSemaphore export_semaphore; err = vkCreateSemaphore(m_device->device(), &sci, nullptr, &export_semaphore); ASSERT_VK_SUCCESS(err); // Create a semaphore to import payload into sci.pNext = nullptr; VkSemaphore import_semaphore; err = vkCreateSemaphore(m_device->device(), &sci, nullptr, &import_semaphore); ASSERT_VK_SUCCESS(err); #ifdef _WIN32 // Export semaphore payload to an opaque handle HANDLE handle = nullptr; VkSemaphoreGetWin32HandleInfoKHR ghi = {VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR, nullptr, export_semaphore, handle_type}; auto vkGetSemaphoreWin32HandleKHR = (PFN_vkGetSemaphoreWin32HandleKHR)vkGetDeviceProcAddr(m_device->device(), "vkGetSemaphoreWin32HandleKHR"); err = vkGetSemaphoreWin32HandleKHR(m_device->device(), &ghi, &handle); ASSERT_VK_SUCCESS(err); // Import opaque handle exported above VkImportSemaphoreWin32HandleInfoKHR ihi = { VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR, nullptr, import_semaphore, 0, handle_type, handle, nullptr}; auto vkImportSemaphoreWin32HandleKHR = (PFN_vkImportSemaphoreWin32HandleKHR)vkGetDeviceProcAddr(m_device->device(), "vkImportSemaphoreWin32HandleKHR"); err = vkImportSemaphoreWin32HandleKHR(m_device->device(), &ihi); ASSERT_VK_SUCCESS(err); #else // Export semaphore payload to an opaque handle int fd = 0; VkSemaphoreGetFdInfoKHR ghi = {VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR, nullptr, export_semaphore, handle_type}; auto vkGetSemaphoreFdKHR = (PFN_vkGetSemaphoreFdKHR)vkGetDeviceProcAddr(m_device->device(), "vkGetSemaphoreFdKHR"); err = vkGetSemaphoreFdKHR(m_device->device(), &ghi, &fd); ASSERT_VK_SUCCESS(err); // Import opaque handle exported above VkImportSemaphoreFdInfoKHR ihi = { VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR, nullptr, import_semaphore, 0, handle_type, fd}; auto vkImportSemaphoreFdKHR = (PFN_vkImportSemaphoreFdKHR)vkGetDeviceProcAddr(m_device->device(), "vkImportSemaphoreFdKHR"); err = vkImportSemaphoreFdKHR(m_device->device(), &ihi); ASSERT_VK_SUCCESS(err); #endif // Signal the exported semaphore and wait on the imported semaphore VkPipelineStageFlags flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; VkSubmitInfo si[] = { {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, &flags, 0, nullptr, 1, &export_semaphore}, {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 1, &import_semaphore, &flags, 0, nullptr, 0, nullptr}, {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, &flags, 0, nullptr, 1, &export_semaphore}, {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 1, &import_semaphore, &flags, 0, nullptr, 0, nullptr}, }; err = vkQueueSubmit(m_device->m_queue, 4, si, VK_NULL_HANDLE); ASSERT_VK_SUCCESS(err); if (m_device->phy().features().sparseBinding) { // Signal the imported semaphore and wait on the exported semaphore VkBindSparseInfo bi[] = { {VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 1, &import_semaphore}, {VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 1, &export_semaphore, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr}, {VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 1, &import_semaphore}, {VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 1, &export_semaphore, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr}, }; err = vkQueueBindSparse(m_device->m_queue, 4, bi, VK_NULL_HANDLE); ASSERT_VK_SUCCESS(err); } // Cleanup err = vkQueueWaitIdle(m_device->m_queue); ASSERT_VK_SUCCESS(err); vkDestroySemaphore(m_device->device(), export_semaphore, nullptr); vkDestroySemaphore(m_device->device(), import_semaphore, nullptr); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, ExternalFence) { #ifdef _WIN32 const auto extension_name = VK_KHR_EXTERNAL_FENCE_WIN32_EXTENSION_NAME; const auto handle_type = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR; #else const auto extension_name = VK_KHR_EXTERNAL_FENCE_FD_EXTENSION_NAME; const auto handle_type = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR; #endif // Check for external fence instance extensions if (InstanceExtensionSupported(VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME); m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s External fence extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Check for external fence device extensions if (DeviceExtensionSupported(gpu(), nullptr, extension_name)) { m_device_extension_names.push_back(extension_name); m_device_extension_names.push_back(VK_KHR_EXTERNAL_FENCE_EXTENSION_NAME); } else { printf("%s External fence extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); // Check for external fence import and export capability VkPhysicalDeviceExternalFenceInfoKHR efi = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO_KHR, nullptr, handle_type}; VkExternalFencePropertiesKHR efp = {VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES_KHR, nullptr}; auto vkGetPhysicalDeviceExternalFencePropertiesKHR = (PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR)vkGetInstanceProcAddr( instance(), "vkGetPhysicalDeviceExternalFencePropertiesKHR"); vkGetPhysicalDeviceExternalFencePropertiesKHR(gpu(), &efi, &efp); if (!(efp.externalFenceFeatures & VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT_KHR) || !(efp.externalFenceFeatures & VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT_KHR)) { printf("%s External fence does not support importing and exporting, skipping test\n", kSkipPrefix); return; } VkResult err; m_errorMonitor->ExpectSuccess(); // Create a fence to export payload from VkFence export_fence; { VkExportFenceCreateInfoKHR efci = {VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO_KHR, nullptr, handle_type}; VkFenceCreateInfo fci = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, &efci, 0}; err = vkCreateFence(m_device->device(), &fci, nullptr, &export_fence); ASSERT_VK_SUCCESS(err); } // Create a fence to import payload into VkFence import_fence; { VkFenceCreateInfo fci = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, nullptr, 0}; err = vkCreateFence(m_device->device(), &fci, nullptr, &import_fence); ASSERT_VK_SUCCESS(err); } #ifdef _WIN32 // Export fence payload to an opaque handle HANDLE handle = nullptr; { VkFenceGetWin32HandleInfoKHR ghi = {VK_STRUCTURE_TYPE_FENCE_GET_WIN32_HANDLE_INFO_KHR, nullptr, export_fence, handle_type}; auto vkGetFenceWin32HandleKHR = (PFN_vkGetFenceWin32HandleKHR)vkGetDeviceProcAddr(m_device->device(), "vkGetFenceWin32HandleKHR"); err = vkGetFenceWin32HandleKHR(m_device->device(), &ghi, &handle); ASSERT_VK_SUCCESS(err); } // Import opaque handle exported above { VkImportFenceWin32HandleInfoKHR ifi = { VK_STRUCTURE_TYPE_IMPORT_FENCE_WIN32_HANDLE_INFO_KHR, nullptr, import_fence, 0, handle_type, handle, nullptr}; auto vkImportFenceWin32HandleKHR = (PFN_vkImportFenceWin32HandleKHR)vkGetDeviceProcAddr(m_device->device(), "vkImportFenceWin32HandleKHR"); err = vkImportFenceWin32HandleKHR(m_device->device(), &ifi); ASSERT_VK_SUCCESS(err); } #else // Export fence payload to an opaque handle int fd = 0; { VkFenceGetFdInfoKHR gfi = {VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR, nullptr, export_fence, handle_type}; auto vkGetFenceFdKHR = (PFN_vkGetFenceFdKHR)vkGetDeviceProcAddr(m_device->device(), "vkGetFenceFdKHR"); err = vkGetFenceFdKHR(m_device->device(), &gfi, &fd); ASSERT_VK_SUCCESS(err); } // Import opaque handle exported above { VkImportFenceFdInfoKHR ifi = {VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR, nullptr, import_fence, 0, handle_type, fd}; auto vkImportFenceFdKHR = (PFN_vkImportFenceFdKHR)vkGetDeviceProcAddr(m_device->device(), "vkImportFenceFdKHR"); err = vkImportFenceFdKHR(m_device->device(), &ifi); ASSERT_VK_SUCCESS(err); } #endif // Signal the exported fence and wait on the imported fence vkQueueSubmit(m_device->m_queue, 0, nullptr, export_fence); vkWaitForFences(m_device->device(), 1, &import_fence, VK_TRUE, 1000000000); vkResetFences(m_device->device(), 1, &import_fence); vkQueueSubmit(m_device->m_queue, 0, nullptr, export_fence); vkWaitForFences(m_device->device(), 1, &import_fence, VK_TRUE, 1000000000); vkResetFences(m_device->device(), 1, &import_fence); // Signal the imported fence and wait on the exported fence vkQueueSubmit(m_device->m_queue, 0, nullptr, import_fence); vkWaitForFences(m_device->device(), 1, &export_fence, VK_TRUE, 1000000000); vkResetFences(m_device->device(), 1, &export_fence); vkQueueSubmit(m_device->m_queue, 0, nullptr, import_fence); vkWaitForFences(m_device->device(), 1, &export_fence, VK_TRUE, 1000000000); vkResetFences(m_device->device(), 1, &export_fence); // Cleanup err = vkQueueWaitIdle(m_device->m_queue); ASSERT_VK_SUCCESS(err); vkDestroyFence(m_device->device(), export_fence, nullptr); vkDestroyFence(m_device->device(), import_fence, nullptr); m_errorMonitor->VerifyNotFound(); } extern "C" void *ReleaseNullFence(void *arg) { struct thread_data_struct *data = (struct thread_data_struct *)arg; for (int i = 0; i < 40000; i++) { vkDestroyFence(data->device, VK_NULL_HANDLE, NULL); if (data->bailout) { break; } } return NULL; } TEST_F(VkPositiveLayerTest, ThreadNullFenceCollision) { test_platform_thread thread; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "THREADING ERROR"); ASSERT_NO_FATAL_FAILURE(Init()); struct thread_data_struct data; data.device = m_device->device(); data.bailout = false; m_errorMonitor->SetBailout(&data.bailout); // Call vkDestroyFence of VK_NULL_HANDLE repeatedly using multiple threads. // There should be no validation error from collision of that non-object. test_platform_thread_create(&thread, ReleaseNullFence, (void *)&data); for (int i = 0; i < 40000; i++) { vkDestroyFence(m_device->device(), VK_NULL_HANDLE, NULL); } test_platform_thread_join(thread, NULL); m_errorMonitor->SetBailout(NULL); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, ClearColorImageWithValidRange) { TEST_DESCRIPTION("Record clear color with a valid VkImageSubresourceRange"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(image.create_info().arrayLayers == 1); ASSERT_TRUE(image.initialized()); image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); const VkClearColorValue clear_color = {{0.0f, 0.0f, 0.0f, 1.0f}}; m_commandBuffer->begin(); const auto cb_handle = m_commandBuffer->handle(); // Try good case { m_errorMonitor->ExpectSuccess(); VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}; vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range); m_errorMonitor->VerifyNotFound(); } // Try good case with VK_REMAINING { m_errorMonitor->ExpectSuccess(); VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, VK_REMAINING_MIP_LEVELS, 0, VK_REMAINING_ARRAY_LAYERS}; vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range); m_errorMonitor->VerifyNotFound(); } } TEST_F(VkPositiveLayerTest, ClearDepthStencilWithValidRange) { TEST_DESCRIPTION("Record clear depth with a valid VkImageSubresourceRange"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } VkImageObj image(m_device); image.Init(32, 32, 1, depth_format, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(image.create_info().arrayLayers == 1); ASSERT_TRUE(image.initialized()); const VkImageAspectFlags ds_aspect = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; image.SetLayout(ds_aspect, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); const VkClearDepthStencilValue clear_value = {}; m_commandBuffer->begin(); const auto cb_handle = m_commandBuffer->handle(); // Try good case { m_errorMonitor->ExpectSuccess(); VkImageSubresourceRange range = {ds_aspect, 0, 1, 0, 1}; vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range); m_errorMonitor->VerifyNotFound(); } // Try good case with VK_REMAINING { m_errorMonitor->ExpectSuccess(); VkImageSubresourceRange range = {ds_aspect, 0, VK_REMAINING_MIP_LEVELS, 0, VK_REMAINING_ARRAY_LAYERS}; vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range); m_errorMonitor->VerifyNotFound(); } } TEST_F(VkPositiveLayerTest, CreateGraphicsPipelineWithIgnoredPointers) { TEST_DESCRIPTION("Create Graphics Pipeline with pointers that must be ignored by layers"); ASSERT_NO_FATAL_FAILURE(Init()); m_depth_stencil_fmt = FindSupportedDepthStencilFormat(gpu()); ASSERT_TRUE(m_depth_stencil_fmt != 0); m_depthStencil->Init(m_device, static_cast(m_width), static_cast(m_height), m_depth_stencil_fmt); ASSERT_NO_FATAL_FAILURE(InitRenderTarget(m_depthStencil->BindInfo())); const uint64_t fake_address_64 = 0xCDCDCDCDCDCDCDCD; const uint64_t fake_address_32 = 0xCDCDCDCD; void *hopefully_undereferencable_pointer = sizeof(void *) == 8 ? reinterpret_cast(fake_address_64) : reinterpret_cast(fake_address_32); VkShaderObj vs(m_device, "#version 450\nvoid main(){gl_Position = vec4(0.0, 0.0, 0.0, 1.0);}\n", VK_SHADER_STAGE_VERTEX_BIT, this); const VkPipelineVertexInputStateCreateInfo pipeline_vertex_input_state_create_info{ VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, nullptr, // pNext 0, // flags 0, nullptr, // bindings 0, nullptr // attributes }; const VkPipelineInputAssemblyStateCreateInfo pipeline_input_assembly_state_create_info{ VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, nullptr, // pNext 0, // flags VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, VK_FALSE // primitive restart }; const VkPipelineRasterizationStateCreateInfo pipeline_rasterization_state_create_info_template{ VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, nullptr, // pNext 0, // flags VK_FALSE, // depthClamp VK_FALSE, // rasterizerDiscardEnable VK_POLYGON_MODE_FILL, VK_CULL_MODE_NONE, VK_FRONT_FACE_COUNTER_CLOCKWISE, VK_FALSE, // depthBias 0.0f, 0.0f, 0.0f, // depthBias params 1.0f // lineWidth }; VkPipelineLayout pipeline_layout; { VkPipelineLayoutCreateInfo pipeline_layout_create_info{ VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, nullptr, // pNext 0, // flags 0, nullptr, // layouts 0, nullptr // push constants }; VkResult err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_create_info, nullptr, &pipeline_layout); ASSERT_VK_SUCCESS(err); } // try disabled rasterizer and no tessellation { m_errorMonitor->ExpectSuccess(); VkPipelineRasterizationStateCreateInfo pipeline_rasterization_state_create_info = pipeline_rasterization_state_create_info_template; pipeline_rasterization_state_create_info.rasterizerDiscardEnable = VK_TRUE; VkGraphicsPipelineCreateInfo graphics_pipeline_create_info{ VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, nullptr, // pNext 0, // flags 1, // stageCount &vs.GetStageCreateInfo(), &pipeline_vertex_input_state_create_info, &pipeline_input_assembly_state_create_info, reinterpret_cast(hopefully_undereferencable_pointer), reinterpret_cast(hopefully_undereferencable_pointer), &pipeline_rasterization_state_create_info, reinterpret_cast(hopefully_undereferencable_pointer), reinterpret_cast(hopefully_undereferencable_pointer), reinterpret_cast(hopefully_undereferencable_pointer), nullptr, // dynamic states pipeline_layout, m_renderPass, 0, // subpass VK_NULL_HANDLE, 0}; VkPipeline pipeline; vkCreateGraphicsPipelines(m_device->handle(), VK_NULL_HANDLE, 1, &graphics_pipeline_create_info, nullptr, &pipeline); m_errorMonitor->VerifyNotFound(); vkDestroyPipeline(m_device->handle(), pipeline, nullptr); } const VkPipelineMultisampleStateCreateInfo pipeline_multisample_state_create_info{ VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, nullptr, // pNext 0, // flags VK_SAMPLE_COUNT_1_BIT, VK_FALSE, // sample shading 0.0f, // minSampleShading nullptr, // pSampleMask VK_FALSE, // alphaToCoverageEnable VK_FALSE // alphaToOneEnable }; // try enabled rasterizer but no subpass attachments { m_errorMonitor->ExpectSuccess(); VkPipelineRasterizationStateCreateInfo pipeline_rasterization_state_create_info = pipeline_rasterization_state_create_info_template; pipeline_rasterization_state_create_info.rasterizerDiscardEnable = VK_FALSE; VkViewport viewport = {0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f}; VkRect2D scissor = {{0, 0}, {static_cast(m_width), static_cast(m_height)}}; const VkPipelineViewportStateCreateInfo pipeline_viewport_state_create_info{ VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, nullptr, // pNext 0, // flags 1, &viewport, 1, &scissor}; VkRenderPass render_pass; { VkSubpassDescription subpass_desc = {}; VkRenderPassCreateInfo render_pass_create_info{ VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, // pNext 0, // flags 0, nullptr, // attachments 1, &subpass_desc, 0, nullptr // subpass dependencies }; VkResult err = vkCreateRenderPass(m_device->handle(), &render_pass_create_info, nullptr, &render_pass); ASSERT_VK_SUCCESS(err); } VkGraphicsPipelineCreateInfo graphics_pipeline_create_info{ VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, nullptr, // pNext 0, // flags 1, // stageCount &vs.GetStageCreateInfo(), &pipeline_vertex_input_state_create_info, &pipeline_input_assembly_state_create_info, nullptr, &pipeline_viewport_state_create_info, &pipeline_rasterization_state_create_info, &pipeline_multisample_state_create_info, reinterpret_cast(hopefully_undereferencable_pointer), reinterpret_cast(hopefully_undereferencable_pointer), nullptr, // dynamic states pipeline_layout, render_pass, 0, // subpass VK_NULL_HANDLE, 0}; VkPipeline pipeline; vkCreateGraphicsPipelines(m_device->handle(), VK_NULL_HANDLE, 1, &graphics_pipeline_create_info, nullptr, &pipeline); m_errorMonitor->VerifyNotFound(); vkDestroyPipeline(m_device->handle(), pipeline, nullptr); vkDestroyRenderPass(m_device->handle(), render_pass, nullptr); } // try dynamic viewport and scissor { m_errorMonitor->ExpectSuccess(); VkPipelineRasterizationStateCreateInfo pipeline_rasterization_state_create_info = pipeline_rasterization_state_create_info_template; pipeline_rasterization_state_create_info.rasterizerDiscardEnable = VK_FALSE; const VkPipelineViewportStateCreateInfo pipeline_viewport_state_create_info{ VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, nullptr, // pNext 0, // flags 1, reinterpret_cast(hopefully_undereferencable_pointer), 1, reinterpret_cast(hopefully_undereferencable_pointer)}; const VkPipelineDepthStencilStateCreateInfo pipeline_depth_stencil_state_create_info{ VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, nullptr, // pNext 0, // flags }; const VkPipelineColorBlendAttachmentState pipeline_color_blend_attachment_state = {}; const VkPipelineColorBlendStateCreateInfo pipeline_color_blend_state_create_info{ VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, nullptr, // pNext 0, // flags VK_FALSE, VK_LOGIC_OP_CLEAR, 1, &pipeline_color_blend_attachment_state, {0.0f, 0.0f, 0.0f, 0.0f}}; const VkDynamicState dynamic_states[2] = {VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR}; const VkPipelineDynamicStateCreateInfo pipeline_dynamic_state_create_info{ VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO, nullptr, // pNext 0, // flags 2, dynamic_states}; VkGraphicsPipelineCreateInfo graphics_pipeline_create_info{VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, nullptr, // pNext 0, // flags 1, // stageCount &vs.GetStageCreateInfo(), &pipeline_vertex_input_state_create_info, &pipeline_input_assembly_state_create_info, nullptr, &pipeline_viewport_state_create_info, &pipeline_rasterization_state_create_info, &pipeline_multisample_state_create_info, &pipeline_depth_stencil_state_create_info, &pipeline_color_blend_state_create_info, &pipeline_dynamic_state_create_info, // dynamic states pipeline_layout, m_renderPass, 0, // subpass VK_NULL_HANDLE, 0}; VkPipeline pipeline; vkCreateGraphicsPipelines(m_device->handle(), VK_NULL_HANDLE, 1, &graphics_pipeline_create_info, nullptr, &pipeline); m_errorMonitor->VerifyNotFound(); vkDestroyPipeline(m_device->handle(), pipeline, nullptr); } vkDestroyPipelineLayout(m_device->handle(), pipeline_layout, nullptr); } TEST_F(VkPositiveLayerTest, ExternalMemory) { TEST_DESCRIPTION("Perform a copy through a pair of buffers linked by external memory"); #ifdef _WIN32 const auto ext_mem_extension_name = VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME; const auto handle_type = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR; #else const auto ext_mem_extension_name = VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME; const auto handle_type = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR; #endif // Check for external memory instance extensions std::vector reqd_instance_extensions = { {VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME}}; for (auto extension_name : reqd_instance_extensions) { if (InstanceExtensionSupported(extension_name)) { m_instance_extension_names.push_back(extension_name); } else { printf("%s Required instance extension %s not supported, skipping test\n", kSkipPrefix, extension_name); return; } } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Check for import/export capability VkPhysicalDeviceExternalBufferInfoKHR ebi = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO_KHR, nullptr, 0, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, handle_type}; VkExternalBufferPropertiesKHR ebp = {VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES_KHR, nullptr, {0, 0, 0}}; auto vkGetPhysicalDeviceExternalBufferPropertiesKHR = (PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR)vkGetInstanceProcAddr( instance(), "vkGetPhysicalDeviceExternalBufferPropertiesKHR"); ASSERT_TRUE(vkGetPhysicalDeviceExternalBufferPropertiesKHR != nullptr); vkGetPhysicalDeviceExternalBufferPropertiesKHR(gpu(), &ebi, &ebp); if (!(ebp.externalMemoryProperties.compatibleHandleTypes & handle_type) || !(ebp.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR) || !(ebp.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR)) { printf("%s External buffer does not support importing and exporting, skipping test\n", kSkipPrefix); return; } // Check if dedicated allocation is required bool dedicated_allocation = ebp.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_KHR; if (dedicated_allocation) { if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); } else { printf("%s Dedicated allocation extension not supported, skipping test\n", kSkipPrefix); return; } } // Check for external memory device extensions if (DeviceExtensionSupported(gpu(), nullptr, ext_mem_extension_name)) { m_device_extension_names.push_back(ext_mem_extension_name); m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME); } else { printf("%s External memory extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); m_errorMonitor->ExpectSuccess(VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT); VkMemoryPropertyFlags mem_flags = 0; const VkDeviceSize buffer_size = 1024; // Create export and import buffers const VkExternalMemoryBufferCreateInfoKHR external_buffer_info = {VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR, nullptr, handle_type}; auto buffer_info = VkBufferObj::create_info(buffer_size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT); buffer_info.pNext = &external_buffer_info; VkBufferObj buffer_export; buffer_export.init_no_mem(*m_device, buffer_info); VkBufferObj buffer_import; buffer_import.init_no_mem(*m_device, buffer_info); // Allocation info auto alloc_info = vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, buffer_export.memory_requirements(), mem_flags); // Add export allocation info to pNext chain VkExportMemoryAllocateInfoKHR export_info = {VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR, nullptr, handle_type}; alloc_info.pNext = &export_info; // Add dedicated allocation info to pNext chain if required VkMemoryDedicatedAllocateInfoKHR dedicated_info = {VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR, nullptr, VK_NULL_HANDLE, buffer_export.handle()}; if (dedicated_allocation) { export_info.pNext = &dedicated_info; } // Allocate memory to be exported vk_testing::DeviceMemory memory_export; memory_export.init(*m_device, alloc_info); // Bind exported memory buffer_export.bind_memory(memory_export, 0); #ifdef _WIN32 // Export memory to handle auto vkGetMemoryWin32HandleKHR = (PFN_vkGetMemoryWin32HandleKHR)vkGetInstanceProcAddr(instance(), "vkGetMemoryWin32HandleKHR"); ASSERT_TRUE(vkGetMemoryWin32HandleKHR != nullptr); VkMemoryGetWin32HandleInfoKHR mghi = {VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR, nullptr, memory_export.handle(), handle_type}; HANDLE handle; ASSERT_VK_SUCCESS(vkGetMemoryWin32HandleKHR(m_device->device(), &mghi, &handle)); VkImportMemoryWin32HandleInfoKHR import_info = {VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR, nullptr, handle_type, handle}; #else // Export memory to fd auto vkGetMemoryFdKHR = (PFN_vkGetMemoryFdKHR)vkGetInstanceProcAddr(instance(), "vkGetMemoryFdKHR"); ASSERT_TRUE(vkGetMemoryFdKHR != nullptr); VkMemoryGetFdInfoKHR mgfi = {VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR, nullptr, memory_export.handle(), handle_type}; int fd; ASSERT_VK_SUCCESS(vkGetMemoryFdKHR(m_device->device(), &mgfi, &fd)); VkImportMemoryFdInfoKHR import_info = {VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR, nullptr, handle_type, fd}; #endif // Import memory alloc_info = vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, buffer_import.memory_requirements(), mem_flags); alloc_info.pNext = &import_info; vk_testing::DeviceMemory memory_import; memory_import.init(*m_device, alloc_info); // Bind imported memory buffer_import.bind_memory(memory_import, 0); // Create test buffers and fill input buffer VkMemoryPropertyFlags mem_prop = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; VkBufferObj buffer_input; buffer_input.init_as_src_and_dst(*m_device, buffer_size, mem_prop); auto input_mem = (uint8_t *)buffer_input.memory().map(); for (uint32_t i = 0; i < buffer_size; i++) { input_mem[i] = (i & 0xFF); } buffer_input.memory().unmap(); VkBufferObj buffer_output; buffer_output.init_as_src_and_dst(*m_device, buffer_size, mem_prop); // Copy from input buffer to output buffer through the exported/imported memory m_commandBuffer->begin(); VkBufferCopy copy_info = {0, 0, buffer_size}; vkCmdCopyBuffer(m_commandBuffer->handle(), buffer_input.handle(), buffer_export.handle(), 1, ©_info); // Insert memory barrier to guarantee copy order VkMemoryBarrier mem_barrier = {VK_STRUCTURE_TYPE_MEMORY_BARRIER, nullptr, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT}; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 1, &mem_barrier, 0, nullptr, 0, nullptr); vkCmdCopyBuffer(m_commandBuffer->handle(), buffer_import.handle(), buffer_output.handle(), 1, ©_info); m_commandBuffer->end(); m_commandBuffer->QueueCommandBuffer(); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, AMDMixedAttachmentSamplesValidateGraphicsPipeline) { TEST_DESCRIPTION("Verify an error message for an incorrect graphics pipeline rasterization sample count."); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkRenderpassObj render_pass(m_device); const VkPipelineLayoutObj pipeline_layout(m_device); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); // Set a mismatched sample count VkPipelineMultisampleStateCreateInfo ms_state_ci = {}; ms_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_4_BIT; VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.SetMSAA(&ms_state_ci); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-subpass-01505"); pipe.CreateVKPipeline(pipeline_layout.handle(), render_pass.handle()); m_errorMonitor->VerifyFound(); } TEST_F(VkPositiveLayerTest, ParameterLayerFeatures2Capture) { TEST_DESCRIPTION("Ensure parameter_validation_layer correctly captures physical device features"); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vkGetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); VkResult err; m_errorMonitor->ExpectSuccess(); VkPhysicalDeviceFeatures2KHR features2; features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2_KHR; features2.pNext = nullptr; vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); // We're not creating a valid m_device, but the phy wrapper is useful vk_testing::PhysicalDevice physical_device(gpu()); vk_testing::QueueCreateInfoArray queue_info(physical_device.queue_properties()); // Only request creation with queuefamilies that have at least one queue std::vector create_queue_infos; auto qci = queue_info.data(); for (uint32_t i = 0; i < queue_info.size(); ++i) { if (qci[i].queueCount) { create_queue_infos.push_back(qci[i]); } } VkDeviceCreateInfo dev_info = {}; dev_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; dev_info.pNext = &features2; dev_info.flags = 0; dev_info.queueCreateInfoCount = create_queue_infos.size(); dev_info.pQueueCreateInfos = create_queue_infos.data(); dev_info.enabledLayerCount = 0; dev_info.ppEnabledLayerNames = nullptr; dev_info.enabledExtensionCount = 0; dev_info.ppEnabledExtensionNames = nullptr; dev_info.pEnabledFeatures = nullptr; VkDevice device; err = vkCreateDevice(gpu(), &dev_info, nullptr, &device); ASSERT_VK_SUCCESS(err); if (features2.features.samplerAnisotropy) { // Test that the parameter layer is caching the features correctly using CreateSampler VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); // If the features were not captured correctly, this should cause an error sampler_ci.anisotropyEnable = VK_TRUE; sampler_ci.maxAnisotropy = physical_device.properties().limits.maxSamplerAnisotropy; VkSampler sampler = VK_NULL_HANDLE; err = vkCreateSampler(device, &sampler_ci, nullptr, &sampler); ASSERT_VK_SUCCESS(err); vkDestroySampler(device, sampler, nullptr); } else { printf("%s Feature samplerAnisotropy not enabled; parameter_layer check skipped.\n", kSkipPrefix); } // Verify the core validation layer has captured the physical device features by creating a a query pool. if (features2.features.pipelineStatisticsQuery) { VkQueryPool query_pool; VkQueryPoolCreateInfo qpci{}; qpci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; qpci.queryType = VK_QUERY_TYPE_PIPELINE_STATISTICS; qpci.queryCount = 1; err = vkCreateQueryPool(device, &qpci, nullptr, &query_pool); ASSERT_VK_SUCCESS(err); vkDestroyQueryPool(device, query_pool, nullptr); } else { printf("%s Feature pipelineStatisticsQuery not enabled; core_validation_layer check skipped.\n", kSkipPrefix); } vkDestroyDevice(device, nullptr); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, GetMemoryRequirements2) { TEST_DESCRIPTION( "Get memory requirements with VK_KHR_get_memory_requirements2 instead of core entry points and verify layers do not emit " "errors when objects are bound and used"); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Check for VK_KHR_get_memory_requirementes2 extensions if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); } else { printf("%s %s not supported, skipping test\n", kSkipPrefix, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); m_errorMonitor->ExpectSuccess(VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT); // Create a test buffer VkBufferObj buffer; buffer.init_no_mem(*m_device, VkBufferObj::create_info(1024, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT)); // Use extension to get buffer memory requirements auto vkGetBufferMemoryRequirements2KHR = reinterpret_cast( vkGetDeviceProcAddr(m_device->device(), "vkGetBufferMemoryRequirements2KHR")); ASSERT_TRUE(vkGetBufferMemoryRequirements2KHR != nullptr); VkBufferMemoryRequirementsInfo2KHR buffer_info = {VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR, nullptr, buffer.handle()}; VkMemoryRequirements2KHR buffer_reqs = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR}; vkGetBufferMemoryRequirements2KHR(m_device->device(), &buffer_info, &buffer_reqs); // Allocate and bind buffer memory vk_testing::DeviceMemory buffer_memory; buffer_memory.init(*m_device, vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, buffer_reqs.memoryRequirements, 0)); vkBindBufferMemory(m_device->device(), buffer.handle(), buffer_memory.handle(), 0); // Create a test image auto image_ci = vk_testing::Image::create_info(); image_ci.imageType = VK_IMAGE_TYPE_2D; image_ci.extent.width = 32; image_ci.extent.height = 32; image_ci.format = VK_FORMAT_R8G8B8A8_UNORM; image_ci.tiling = VK_IMAGE_TILING_OPTIMAL; image_ci.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT; vk_testing::Image image; image.init_no_mem(*m_device, image_ci); // Use extension to get image memory requirements auto vkGetImageMemoryRequirements2KHR = reinterpret_cast( vkGetDeviceProcAddr(m_device->device(), "vkGetImageMemoryRequirements2KHR")); ASSERT_TRUE(vkGetImageMemoryRequirements2KHR != nullptr); VkImageMemoryRequirementsInfo2KHR image_info = {VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR, nullptr, image.handle()}; VkMemoryRequirements2KHR image_reqs = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR}; vkGetImageMemoryRequirements2KHR(m_device->device(), &image_info, &image_reqs); // Allocate and bind image memory vk_testing::DeviceMemory image_memory; image_memory.init(*m_device, vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, image_reqs.memoryRequirements, 0)); vkBindImageMemory(m_device->device(), image.handle(), image_memory.handle(), 0); // Now execute arbitrary commands that use the test buffer and image m_commandBuffer->begin(); // Fill buffer with 0 vkCmdFillBuffer(m_commandBuffer->handle(), buffer.handle(), 0, VK_WHOLE_SIZE, 0); // Transition and clear image const auto subresource_range = image.subresource_range(VK_IMAGE_ASPECT_COLOR_BIT); const auto barrier = image.image_memory_barrier(0, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL, subresource_range); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier); const VkClearColorValue color = {}; vkCmdClearColorImage(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, &color, 1, &subresource_range); // Submit and verify no validation errors m_commandBuffer->end(); m_commandBuffer->QueueCommandBuffer(); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, BindMemory2) { TEST_DESCRIPTION( "Bind memory with VK_KHR_bind_memory2 instead of core entry points and verify layers do not emit errors when objects are " "used"); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Check for VK_KHR_get_memory_requirementes2 extensions if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); } else { printf("%s %s not supported, skipping test\n", kSkipPrefix, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); m_errorMonitor->ExpectSuccess(VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT); // Create a test buffer VkBufferObj buffer; buffer.init_no_mem(*m_device, VkBufferObj::create_info(1024, VK_BUFFER_USAGE_TRANSFER_DST_BIT)); // Allocate buffer memory vk_testing::DeviceMemory buffer_memory; buffer_memory.init(*m_device, vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, buffer.memory_requirements(), 0)); // Bind buffer memory with extension auto vkBindBufferMemory2KHR = reinterpret_cast(vkGetDeviceProcAddr(m_device->device(), "vkBindBufferMemory2KHR")); ASSERT_TRUE(vkBindBufferMemory2KHR != nullptr); VkBindBufferMemoryInfoKHR buffer_bind_info = {VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR, nullptr, buffer.handle(), buffer_memory.handle(), 0}; vkBindBufferMemory2KHR(m_device->device(), 1, &buffer_bind_info); // Create a test image auto image_ci = vk_testing::Image::create_info(); image_ci.imageType = VK_IMAGE_TYPE_2D; image_ci.extent.width = 32; image_ci.extent.height = 32; image_ci.format = VK_FORMAT_R8G8B8A8_UNORM; image_ci.tiling = VK_IMAGE_TILING_OPTIMAL; image_ci.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT; vk_testing::Image image; image.init_no_mem(*m_device, image_ci); // Allocate image memory vk_testing::DeviceMemory image_memory; image_memory.init(*m_device, vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, image.memory_requirements(), 0)); // Bind image memory with extension auto vkBindImageMemory2KHR = reinterpret_cast(vkGetDeviceProcAddr(m_device->device(), "vkBindImageMemory2KHR")); ASSERT_TRUE(vkBindImageMemory2KHR != nullptr); VkBindImageMemoryInfoKHR image_bind_info = {VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR, nullptr, image.handle(), image_memory.handle(), 0}; vkBindImageMemory2KHR(m_device->device(), 1, &image_bind_info); // Now execute arbitrary commands that use the test buffer and image m_commandBuffer->begin(); // Fill buffer with 0 vkCmdFillBuffer(m_commandBuffer->handle(), buffer.handle(), 0, VK_WHOLE_SIZE, 0); // Transition and clear image const auto subresource_range = image.subresource_range(VK_IMAGE_ASPECT_COLOR_BIT); const auto barrier = image.image_memory_barrier(0, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL, subresource_range); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier); const VkClearColorValue color = {}; vkCmdClearColorImage(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, &color, 1, &subresource_range); // Submit and verify no validation errors m_commandBuffer->end(); m_commandBuffer->QueueCommandBuffer(); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, MultiplaneImageTests) { TEST_DESCRIPTION("Positive test of multiplane image operations"); // Enable KHR multiplane req'd extensions bool mp_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_SPEC_VERSION); if (mp_extensions) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } SetTargetApiVersion(VK_API_VERSION_1_1); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); if (mp_extensions) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); } else { printf("%s test requires KHR multiplane extensions, not available. Skipping.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkImageCreateInfo ci = {}; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM_KHR; // All planes of equal extent ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; ci.extent = {128, 128, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Verify format VkFormatFeatureFlags features = VK_FORMAT_FEATURE_TRANSFER_SRC_BIT | VK_FORMAT_FEATURE_TRANSFER_DST_BIT; bool supported = ImageFormatAndFeaturesSupported(instance(), gpu(), ci, features); if (!supported) { printf("%s Multiplane image format not supported. Skipping test.\n", kSkipPrefix); return; // Assume there's low ROI on searching for different mp formats } VkImage image; ASSERT_VK_SUCCESS(vkCreateImage(device(), &ci, NULL, &image)); // Allocate & bind memory VkPhysicalDeviceMemoryProperties phys_mem_props; vkGetPhysicalDeviceMemoryProperties(gpu(), &phys_mem_props); VkMemoryRequirements mem_reqs; vkGetImageMemoryRequirements(device(), image, &mem_reqs); VkDeviceMemory mem_obj = VK_NULL_HANDLE; VkMemoryPropertyFlagBits mem_props = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; for (uint32_t type = 0; type < phys_mem_props.memoryTypeCount; type++) { if ((mem_reqs.memoryTypeBits & (1 << type)) && ((phys_mem_props.memoryTypes[type].propertyFlags & mem_props) == mem_props)) { VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.allocationSize = mem_reqs.size; alloc_info.memoryTypeIndex = type; ASSERT_VK_SUCCESS(vkAllocateMemory(device(), &alloc_info, NULL, &mem_obj)); break; } } if (VK_NULL_HANDLE == mem_obj) { printf("%s Unable to allocate image memory. Skipping test.\n", kSkipPrefix); vkDestroyImage(device(), image, NULL); return; } ASSERT_VK_SUCCESS(vkBindImageMemory(device(), image, mem_obj, 0)); // Copy plane 0 to plane 2 VkImageCopy copyRegion = {}; copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_0_BIT_KHR; copyRegion.srcSubresource.mipLevel = 0; copyRegion.srcSubresource.baseArrayLayer = 0; copyRegion.srcSubresource.layerCount = 1; copyRegion.srcOffset = {0, 0, 0}; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR; copyRegion.dstSubresource.mipLevel = 0; copyRegion.dstSubresource.baseArrayLayer = 0; copyRegion.dstSubresource.layerCount = 1; copyRegion.dstOffset = {0, 0, 0}; copyRegion.extent.width = 128; copyRegion.extent.height = 128; copyRegion.extent.depth = 1; m_errorMonitor->ExpectSuccess(); m_commandBuffer->begin(); m_commandBuffer->CopyImage(image, VK_IMAGE_LAYOUT_GENERAL, image, VK_IMAGE_LAYOUT_GENERAL, 1, ©Region); m_commandBuffer->end(); m_errorMonitor->VerifyNotFound(); vkFreeMemory(device(), mem_obj, NULL); vkDestroyImage(device(), image, NULL); // Repeat bind test on a DISJOINT multi-planar image, with per-plane memory objects, using API2 variants // features |= VK_FORMAT_FEATURE_DISJOINT_BIT; ci.flags = VK_IMAGE_CREATE_DISJOINT_BIT; if (ImageFormatAndFeaturesSupported(instance(), gpu(), ci, features)) { ASSERT_VK_SUCCESS(vkCreateImage(device(), &ci, NULL, &image)); // Allocate & bind memory VkPhysicalDeviceMemoryProperties2 phys_mem_props2 = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2}; vkGetPhysicalDeviceMemoryProperties2(gpu(), &phys_mem_props2); VkImagePlaneMemoryRequirementsInfo image_plane_req = {VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO}; VkImageMemoryRequirementsInfo2 mem_req_info2 = {VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2}; mem_req_info2.pNext = &image_plane_req; mem_req_info2.image = image; VkMemoryRequirements2 mem_reqs2 = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2}; VkDeviceMemory p0_mem, p1_mem, p2_mem; mem_props = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; VkMemoryAllocateInfo alloc_info = {VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO}; // Plane 0 image_plane_req.planeAspect = VK_IMAGE_ASPECT_PLANE_0_BIT; vkGetImageMemoryRequirements2(device(), &mem_req_info2, &mem_reqs2); uint32_t mem_type = 0; for (mem_type = 0; mem_type < phys_mem_props2.memoryProperties.memoryTypeCount; mem_type++) { if ((mem_reqs2.memoryRequirements.memoryTypeBits & (1 << mem_type)) && ((phys_mem_props2.memoryProperties.memoryTypes[mem_type].propertyFlags & mem_props) == mem_props)) { alloc_info.memoryTypeIndex = mem_type; break; } } alloc_info.allocationSize = mem_reqs2.memoryRequirements.size; ASSERT_VK_SUCCESS(vkAllocateMemory(device(), &alloc_info, NULL, &p0_mem)); // Plane 1 & 2 use same memory type image_plane_req.planeAspect = VK_IMAGE_ASPECT_PLANE_1_BIT; vkGetImageMemoryRequirements2(device(), &mem_req_info2, &mem_reqs2); alloc_info.allocationSize = mem_reqs2.memoryRequirements.size; ASSERT_VK_SUCCESS(vkAllocateMemory(device(), &alloc_info, NULL, &p1_mem)); image_plane_req.planeAspect = VK_IMAGE_ASPECT_PLANE_2_BIT; vkGetImageMemoryRequirements2(device(), &mem_req_info2, &mem_reqs2); alloc_info.allocationSize = mem_reqs2.memoryRequirements.size; ASSERT_VK_SUCCESS(vkAllocateMemory(device(), &alloc_info, NULL, &p2_mem)); // Set up 3-plane binding VkBindImageMemoryInfo bind_info[3]; for (int plane = 0; plane < 3; plane++) { bind_info[plane].sType = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO; bind_info[plane].pNext = nullptr; bind_info[plane].image = image; bind_info[plane].memoryOffset = 0; } bind_info[0].memory = p0_mem; bind_info[1].memory = p1_mem; bind_info[2].memory = p2_mem; m_errorMonitor->ExpectSuccess(); vkBindImageMemory2(device(), 3, bind_info); m_errorMonitor->VerifyNotFound(); vkFreeMemory(device(), p0_mem, NULL); vkFreeMemory(device(), p1_mem, NULL); vkFreeMemory(device(), p2_mem, NULL); vkDestroyImage(device(), image, NULL); } // Test that changing the layout of ASPECT_COLOR also changes the layout of the individual planes VkBufferObj buffer; VkMemoryPropertyFlags reqs = 0; buffer.init_as_src(*m_device, (VkDeviceSize)128 * 128 * 3, reqs); VkImageObj mpimage(m_device); mpimage.Init(256, 256, 1, VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM_KHR, VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkBufferImageCopy copy_region = {}; copy_region.bufferRowLength = 128; copy_region.bufferImageHeight = 128; copy_region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_1_BIT_KHR; copy_region.imageSubresource.layerCount = 1; copy_region.imageExtent.height = 64; copy_region.imageExtent.width = 64; copy_region.imageExtent.depth = 1; vkResetCommandBuffer(m_commandBuffer->handle(), 0); m_commandBuffer->begin(); mpimage.ImageMemoryBarrier(m_commandBuffer, VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer.handle(), mpimage.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ©_region); m_commandBuffer->end(); m_commandBuffer->QueueCommandBuffer(false); m_errorMonitor->VerifyNotFound(); // Test to verify that views of multiplanar images have layouts tracked correctly // by changing the image's layout then using a view of that image VkImageView view; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = mpimage.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM_KHR; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; vkCreateImageView(m_device->device(), &ivci, nullptr, &view); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; VkResult err; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkDescriptorImageInfo image_info{}; image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; image_info.imageView = view; image_info.sampler = sampler; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &image_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform sampler2D s;\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = texture(s, vec2(1));\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_errorMonitor->ExpectSuccess(); m_commandBuffer->begin(); VkImageMemoryBarrier img_barrier = {}; img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT; img_barrier.dstAccessMask = VK_ACCESS_SHADER_WRITE_BIT; img_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; img_barrier.image = mpimage.handle(); img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, 0, nullptr); VkViewport viewport = {0, 0, 16, 16, 0, 1}; VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyNotFound(); vkQueueWaitIdle(m_device->m_queue); vkDestroyImageView(m_device->device(), view, NULL); vkDestroySampler(m_device->device(), sampler, nullptr); } TEST_F(VkPositiveLayerTest, ApiVersionZero) { TEST_DESCRIPTION("Check that apiVersion = 0 is valid."); m_errorMonitor->ExpectSuccess(); app_info.apiVersion = 0U; ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, DrawIndirectCountKHR) { TEST_DESCRIPTION("Test covered valid usage for vkCmdDrawIndirectCountKHR"); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DRAW_INDIRECT_COUNT_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_DRAW_INDIRECT_COUNT_EXTENSION_NAME); } else { printf(" VK_KHR_draw_indirect_count Extension not supported, skipping test\n"); return; } ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkMemoryRequirements memory_requirements; VkMemoryAllocateInfo memory_allocate_info = {VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO}; auto vkCmdDrawIndirectCountKHR = (PFN_vkCmdDrawIndirectCountKHR)vkGetDeviceProcAddr(m_device->device(), "vkCmdDrawIndirectCountKHR"); char const *vsSource = "#version 450\n" "\n" "void main() { gl_Position = vec4(0); }\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main() {\n" " color = vec4(1, 0, 0, 1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); VkDescriptorSetObj descriptor_set(m_device); descriptor_set.AppendDummy(); descriptor_set.CreateVKDescriptorSet(m_commandBuffer); VkResult err = pipe.CreateVKPipeline(descriptor_set.GetPipelineLayout(), renderPass()); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); m_commandBuffer->BindDescriptorSet(descriptor_set); VkViewport viewport = {0, 0, 16, 16, 0, 1}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); VkBufferCreateInfo buffer_create_info = {VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO}; buffer_create_info.size = sizeof(VkDrawIndirectCommand); buffer_create_info.usage = VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT; VkBuffer draw_buffer; vkCreateBuffer(m_device->device(), &buffer_create_info, nullptr, &draw_buffer); VkBufferCreateInfo count_buffer_create_info = {VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO}; count_buffer_create_info.size = sizeof(uint32_t); count_buffer_create_info.usage = VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT; VkBuffer count_buffer; vkCreateBuffer(m_device->device(), &count_buffer_create_info, nullptr, &count_buffer); vkGetBufferMemoryRequirements(m_device->device(), count_buffer, &memory_requirements); memory_allocate_info.allocationSize = memory_requirements.size; m_device->phy().set_memory_type(memory_requirements.memoryTypeBits, &memory_allocate_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); VkDeviceMemory count_buffer_memory; vkAllocateMemory(m_device->device(), &memory_allocate_info, NULL, &count_buffer_memory); vkBindBufferMemory(m_device->device(), count_buffer, count_buffer_memory, 0); // VUID-vkCmdDrawIndirectCountKHR-buffer-03104 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawIndirectCountKHR-buffer-03104"); vkCmdDrawIndirectCountKHR(m_commandBuffer->handle(), draw_buffer, 0, count_buffer, 0, 1, sizeof(VkDrawIndirectCommand)); m_errorMonitor->VerifyFound(); vkGetBufferMemoryRequirements(m_device->device(), draw_buffer, &memory_requirements); memory_allocate_info.allocationSize = memory_requirements.size; m_device->phy().set_memory_type(memory_requirements.memoryTypeBits, &memory_allocate_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); VkDeviceMemory draw_buffer_memory; vkAllocateMemory(m_device->device(), &memory_allocate_info, NULL, &draw_buffer_memory); vkBindBufferMemory(m_device->device(), draw_buffer, draw_buffer_memory, 0); VkBuffer count_buffer_unbound; vkCreateBuffer(m_device->device(), &count_buffer_create_info, nullptr, &count_buffer_unbound); // VUID-vkCmdDrawIndirectCountKHR-countBuffer-03106 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawIndirectCountKHR-countBuffer-03106"); vkCmdDrawIndirectCountKHR(m_commandBuffer->handle(), draw_buffer, 0, count_buffer_unbound, 0, 1, sizeof(VkDrawIndirectCommand)); m_errorMonitor->VerifyFound(); // VUID-vkCmdDrawIndirectCountKHR-offset-03108 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawIndirectCountKHR-offset-03108"); vkCmdDrawIndirectCountKHR(m_commandBuffer->handle(), draw_buffer, 1, count_buffer, 0, 1, sizeof(VkDrawIndirectCommand)); m_errorMonitor->VerifyFound(); // VUID-vkCmdDrawIndirectCountKHR-countBufferOffset-03109 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawIndirectCountKHR-countBufferOffset-03109"); vkCmdDrawIndirectCountKHR(m_commandBuffer->handle(), draw_buffer, 0, count_buffer, 1, 1, sizeof(VkDrawIndirectCommand)); m_errorMonitor->VerifyFound(); // VUID-vkCmdDrawIndirectCountKHR-stride-03110 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawIndirectCountKHR-stride-03110"); vkCmdDrawIndirectCountKHR(m_commandBuffer->handle(), draw_buffer, 0, count_buffer, 0, 1, 1); m_errorMonitor->VerifyFound(); // TODO: These covered VUIDs aren't tested. There is also no test coverage for the core Vulkan 1.0 vkCmdDraw* equivalent of // these: // VUID-vkCmdDrawIndirectCountKHR-renderPass-03113 // VUID-vkCmdDrawIndirectCountKHR-subpass-03114 // VUID-vkCmdDrawIndirectCountKHR-None-03120 m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); vkDestroyBuffer(m_device->device(), draw_buffer, 0); vkDestroyBuffer(m_device->device(), count_buffer, 0); vkDestroyBuffer(m_device->device(), count_buffer_unbound, 0); vkFreeMemory(m_device->device(), draw_buffer_memory, 0); vkFreeMemory(m_device->device(), count_buffer_memory, 0); } TEST_F(VkLayerTest, DrawIndexedIndirectCountKHR) { TEST_DESCRIPTION("Test covered valid usage for vkCmdDrawIndexedIndirectCountKHR"); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DRAW_INDIRECT_COUNT_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_DRAW_INDIRECT_COUNT_EXTENSION_NAME); } else { printf(" VK_KHR_draw_indirect_count Extension not supported, skipping test\n"); return; } ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkMemoryRequirements memory_requirements; VkMemoryAllocateInfo memory_allocate_info = {VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO}; auto vkCmdDrawIndexedIndirectCountKHR = (PFN_vkCmdDrawIndexedIndirectCountKHR)vkGetDeviceProcAddr(m_device->device(), "vkCmdDrawIndexedIndirectCountKHR"); char const *vsSource = "#version 450\n" "\n" "void main() { gl_Position = vec4(0); }\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main() {\n" " color = vec4(1, 0, 0, 1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); VkResult err = pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); m_commandBuffer->BindDescriptorSet(descriptorSet); VkViewport viewport = {0, 0, 16, 16, 0, 1}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); VkBufferCreateInfo buffer_create_info = {VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO}; buffer_create_info.size = sizeof(VkDrawIndexedIndirectCommand); buffer_create_info.usage = VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT; VkBuffer draw_buffer; vkCreateBuffer(m_device->device(), &buffer_create_info, nullptr, &draw_buffer); vkGetBufferMemoryRequirements(m_device->device(), draw_buffer, &memory_requirements); memory_allocate_info.allocationSize = memory_requirements.size; m_device->phy().set_memory_type(memory_requirements.memoryTypeBits, &memory_allocate_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); VkDeviceMemory draw_buffer_memory; vkAllocateMemory(m_device->device(), &memory_allocate_info, NULL, &draw_buffer_memory); vkBindBufferMemory(m_device->device(), draw_buffer, draw_buffer_memory, 0); VkBufferCreateInfo count_buffer_create_info = {VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO}; count_buffer_create_info.size = sizeof(uint32_t); count_buffer_create_info.usage = VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT; VkBuffer count_buffer; vkCreateBuffer(m_device->device(), &count_buffer_create_info, nullptr, &count_buffer); vkGetBufferMemoryRequirements(m_device->device(), count_buffer, &memory_requirements); memory_allocate_info.allocationSize = memory_requirements.size; m_device->phy().set_memory_type(memory_requirements.memoryTypeBits, &memory_allocate_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); VkDeviceMemory count_buffer_memory; vkAllocateMemory(m_device->device(), &memory_allocate_info, NULL, &count_buffer_memory); vkBindBufferMemory(m_device->device(), count_buffer, count_buffer_memory, 0); VkBufferCreateInfo index_buffer_create_info = {VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO}; index_buffer_create_info.size = sizeof(uint32_t); index_buffer_create_info.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT; VkBuffer index_buffer; vkCreateBuffer(m_device->device(), &index_buffer_create_info, nullptr, &index_buffer); vkGetBufferMemoryRequirements(m_device->device(), index_buffer, &memory_requirements); memory_allocate_info.allocationSize = memory_requirements.size; m_device->phy().set_memory_type(memory_requirements.memoryTypeBits, &memory_allocate_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); VkDeviceMemory index_buffer_memory; vkAllocateMemory(m_device->device(), &memory_allocate_info, NULL, &index_buffer_memory); vkBindBufferMemory(m_device->device(), index_buffer, index_buffer_memory, 0); // VUID-vkCmdDrawIndexedIndirectCountKHR-None-03152 (partial - only tests whether the index buffer is bound) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawIndexedIndirectCountKHR-None-03152"); vkCmdDrawIndexedIndirectCountKHR(m_commandBuffer->handle(), draw_buffer, 0, count_buffer, 0, 1, sizeof(VkDrawIndexedIndirectCommand)); m_errorMonitor->VerifyFound(); vkCmdBindIndexBuffer(m_commandBuffer->handle(), index_buffer, 0, VK_INDEX_TYPE_UINT32); VkBuffer draw_buffer_unbound; vkCreateBuffer(m_device->device(), &count_buffer_create_info, nullptr, &draw_buffer_unbound); // VUID-vkCmdDrawIndexedIndirectCountKHR-buffer-03136 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawIndexedIndirectCountKHR-buffer-03136"); vkCmdDrawIndexedIndirectCountKHR(m_commandBuffer->handle(), draw_buffer_unbound, 0, count_buffer, 0, 1, sizeof(VkDrawIndexedIndirectCommand)); m_errorMonitor->VerifyFound(); VkBuffer count_buffer_unbound; vkCreateBuffer(m_device->device(), &count_buffer_create_info, nullptr, &count_buffer_unbound); // VUID-vkCmdDrawIndexedIndirectCountKHR-countBuffer-03138 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawIndexedIndirectCountKHR-countBuffer-03138"); vkCmdDrawIndexedIndirectCountKHR(m_commandBuffer->handle(), draw_buffer, 0, count_buffer_unbound, 0, 1, sizeof(VkDrawIndexedIndirectCommand)); m_errorMonitor->VerifyFound(); // VUID-vkCmdDrawIndexedIndirectCountKHR-offset-03140 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawIndexedIndirectCountKHR-offset-03140"); vkCmdDrawIndexedIndirectCountKHR(m_commandBuffer->handle(), draw_buffer, 1, count_buffer, 0, 1, sizeof(VkDrawIndexedIndirectCommand)); m_errorMonitor->VerifyFound(); // VUID-vkCmdDrawIndexedIndirectCountKHR-countBufferOffset-03141 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawIndexedIndirectCountKHR-countBufferOffset-03141"); vkCmdDrawIndexedIndirectCountKHR(m_commandBuffer->handle(), draw_buffer, 0, count_buffer, 1, 1, sizeof(VkDrawIndexedIndirectCommand)); m_errorMonitor->VerifyFound(); // VUID-vkCmdDrawIndexedIndirectCountKHR-stride-03142 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawIndexedIndirectCountKHR-stride-03142"); vkCmdDrawIndexedIndirectCountKHR(m_commandBuffer->handle(), draw_buffer, 0, count_buffer, 0, 1, 1); m_errorMonitor->VerifyFound(); // TODO: These covered VUIDs aren't tested. There is also no test coverage for the core Vulkan 1.0 vkCmdDraw* equivalent of // these: // VUID-vkCmdDrawIndexedIndirectCountKHR-renderPass-03145 // VUID-vkCmdDrawIndexedIndirectCountKHR-subpass-03146 // VUID-vkCmdDrawIndexedIndirectCountKHR-None-03152 (partial) m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); vkDestroyBuffer(m_device->device(), draw_buffer, 0); vkDestroyBuffer(m_device->device(), draw_buffer_unbound, 0); vkDestroyBuffer(m_device->device(), count_buffer, 0); vkDestroyBuffer(m_device->device(), count_buffer_unbound, 0); vkDestroyBuffer(m_device->device(), index_buffer, 0); vkFreeMemory(m_device->device(), draw_buffer_memory, 0); vkFreeMemory(m_device->device(), count_buffer_memory, 0); vkFreeMemory(m_device->device(), index_buffer_memory, 0); } TEST_F(VkLayerTest, ExclusiveScissorNV) { TEST_DESCRIPTION("Test VK_NV_scissor_exclusive with multiViewport disabled."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); std::array required_device_extensions = {{VK_NV_SCISSOR_EXCLUSIVE_EXTENSION_NAME}}; for (auto device_extension : required_device_extensions) { if (DeviceExtensionSupported(gpu(), nullptr, device_extension)) { m_device_extension_names.push_back(device_extension); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, device_extension); return; } } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vkGetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); // Create a device that enables exclusive scissor but disables multiViewport auto exclusive_scissor_features = lvl_init_struct(); auto features2 = lvl_init_struct(&exclusive_scissor_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); features2.features.multiViewport = VK_FALSE; ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Based on PSOViewportStateTests { VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; VkViewport viewports[] = {viewport, viewport}; VkRect2D scissor = {{0, 0}, {64, 64}}; VkRect2D scissors[100] = {scissor, scissor}; using std::vector; struct TestCase { uint32_t viewport_count; VkViewport *viewports; uint32_t scissor_count; VkRect2D *scissors; uint32_t exclusive_scissor_count; VkRect2D *exclusive_scissors; vector vuids; }; vector test_cases = { {1, viewports, 1, scissors, 2, scissors, {"VUID-VkPipelineViewportExclusiveScissorStateCreateInfoNV-exclusiveScissorCount-02027", "VUID-VkPipelineViewportExclusiveScissorStateCreateInfoNV-exclusiveScissorCount-02029"}}, {1, viewports, 1, scissors, 100, scissors, {"VUID-VkPipelineViewportExclusiveScissorStateCreateInfoNV-exclusiveScissorCount-02027", "VUID-VkPipelineViewportExclusiveScissorStateCreateInfoNV-exclusiveScissorCount-02028", "VUID-VkPipelineViewportExclusiveScissorStateCreateInfoNV-exclusiveScissorCount-02029"}}, {1, viewports, 1, scissors, 1, nullptr, {"VUID-VkPipelineViewportExclusiveScissorStateCreateInfoNV-pDynamicStates-02030"}}, }; for (const auto &test_case : test_cases) { VkPipelineViewportExclusiveScissorStateCreateInfoNV exc = { VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV}; const auto break_vp = [&test_case, &exc](CreatePipelineHelper &helper) { helper.vp_state_ci_.viewportCount = test_case.viewport_count; helper.vp_state_ci_.pViewports = test_case.viewports; helper.vp_state_ci_.scissorCount = test_case.scissor_count; helper.vp_state_ci_.pScissors = test_case.scissors; helper.vp_state_ci_.pNext = &exc; exc.exclusiveScissorCount = test_case.exclusive_scissor_count; exc.pExclusiveScissors = test_case.exclusive_scissors; }; CreatePipelineHelper::OneshotTest(*this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.vuids); } } // Based on SetDynScissorParamTests { auto vkCmdSetExclusiveScissorNV = (PFN_vkCmdSetExclusiveScissorNV)vkGetDeviceProcAddr(m_device->device(), "vkCmdSetExclusiveScissorNV"); const VkRect2D scissor = {{0, 0}, {16, 16}}; const VkRect2D scissors[] = {scissor, scissor}; m_commandBuffer->begin(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetExclusiveScissorNV-firstExclusiveScissor-02035"); vkCmdSetExclusiveScissorNV(m_commandBuffer->handle(), 1, 1, scissors); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdSetExclusiveScissorNV: parameter exclusiveScissorCount must be greater than 0"); vkCmdSetExclusiveScissorNV(m_commandBuffer->handle(), 0, 0, nullptr); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetExclusiveScissorNV-exclusiveScissorCount-02036"); vkCmdSetExclusiveScissorNV(m_commandBuffer->handle(), 0, 2, scissors); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdSetExclusiveScissorNV: parameter exclusiveScissorCount must be greater than 0"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetExclusiveScissorNV-firstExclusiveScissor-02035"); vkCmdSetExclusiveScissorNV(m_commandBuffer->handle(), 1, 0, scissors); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetExclusiveScissorNV-firstExclusiveScissor-02035"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetExclusiveScissorNV-exclusiveScissorCount-02036"); vkCmdSetExclusiveScissorNV(m_commandBuffer->handle(), 1, 2, scissors); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdSetExclusiveScissorNV: required parameter pExclusiveScissors specified as NULL"); vkCmdSetExclusiveScissorNV(m_commandBuffer->handle(), 0, 1, nullptr); m_errorMonitor->VerifyFound(); struct TestCase { VkRect2D scissor; std::string vuid; }; std::vector test_cases = { {{{-1, 0}, {16, 16}}, "VUID-vkCmdSetExclusiveScissorNV-x-02037"}, {{{0, -1}, {16, 16}}, "VUID-vkCmdSetExclusiveScissorNV-x-02037"}, {{{1, 0}, {INT32_MAX, 16}}, "VUID-vkCmdSetExclusiveScissorNV-offset-02038"}, {{{INT32_MAX, 0}, {1, 16}}, "VUID-vkCmdSetExclusiveScissorNV-offset-02038"}, {{{0, 0}, {uint32_t{INT32_MAX} + 1, 16}}, "VUID-vkCmdSetExclusiveScissorNV-offset-02038"}, {{{0, 1}, {16, INT32_MAX}}, "VUID-vkCmdSetExclusiveScissorNV-offset-02039"}, {{{0, INT32_MAX}, {16, 1}}, "VUID-vkCmdSetExclusiveScissorNV-offset-02039"}, {{{0, 0}, {16, uint32_t{INT32_MAX} + 1}}, "VUID-vkCmdSetExclusiveScissorNV-offset-02039"}}; for (const auto &test_case : test_cases) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.vuid); vkCmdSetExclusiveScissorNV(m_commandBuffer->handle(), 0, 1, &test_case.scissor); m_errorMonitor->VerifyFound(); } m_commandBuffer->end(); } } TEST_F(VkLayerTest, ShadingRateImageNV) { TEST_DESCRIPTION("Test VK_NV_shading_rate_image."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); std::array required_device_extensions = {{VK_NV_SHADING_RATE_IMAGE_EXTENSION_NAME}}; for (auto device_extension : required_device_extensions) { if (DeviceExtensionSupported(gpu(), nullptr, device_extension)) { m_device_extension_names.push_back(device_extension); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, device_extension); return; } } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vkGetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); // Create a device that enables shading_rate_image but disables multiViewport auto shading_rate_image_features = lvl_init_struct(); auto features2 = lvl_init_struct(&shading_rate_image_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); features2.features.multiViewport = VK_FALSE; ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Test shading rate image creation VkImage image = VK_NULL_HANDLE; VkResult result = VK_RESULT_MAX_ENUM; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_R8_UINT; image_create_info.extent.width = 4; image_create_info.extent.height = 4; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV; image_create_info.queueFamilyIndexCount = 0; image_create_info.pQueueFamilyIndices = NULL; image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; image_create_info.flags = VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT; // image type must be 2D image_create_info.imageType = VK_IMAGE_TYPE_3D; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-02082"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } image_create_info.imageType = VK_IMAGE_TYPE_2D; // must be single sample image_create_info.samples = VK_SAMPLE_COUNT_2_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-samples-02083"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; // tiling must be optimal image_create_info.tiling = VK_IMAGE_TILING_LINEAR; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-tiling-02084"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; // Should succeed. result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyNotFound(); // bind memory to the image VkMemoryRequirements memory_reqs; VkDeviceMemory image_memory; bool pass; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.pNext = NULL; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), image, &memory_reqs); memory_info.allocationSize = memory_reqs.size; pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); result = vkAllocateMemory(m_device->device(), &memory_info, NULL, &image_memory); ASSERT_VK_SUCCESS(result); result = vkBindImageMemory(m_device->device(), image, image_memory, 0); ASSERT_VK_SUCCESS(result); // Test image view creation VkImageView view; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image; ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_R8_UINT; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // view type must be 2D or 2D_ARRAY ivci.viewType = VK_IMAGE_VIEW_TYPE_CUBE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-02086"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-01003"); result = vkCreateImageView(m_device->device(), &ivci, nullptr, &view); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImageView(m_device->device(), view, NULL); view = VK_NULL_HANDLE; } ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; // format must be R8_UINT ivci.format = VK_FORMAT_R8_UNORM; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-02087"); result = vkCreateImageView(m_device->device(), &ivci, nullptr, &view); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImageView(m_device->device(), view, NULL); view = VK_NULL_HANDLE; } ivci.format = VK_FORMAT_R8_UINT; vkCreateImageView(m_device->device(), &ivci, nullptr, &view); m_errorMonitor->VerifyNotFound(); // Test pipeline creation VkPipelineViewportShadingRateImageStateCreateInfoNV vsrisci = { VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV}; VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; VkViewport viewports[20] = {viewport, viewport}; VkRect2D scissor = {{0, 0}, {64, 64}}; VkRect2D scissors[20] = {scissor, scissor}; VkDynamicState dynPalette = VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV; VkPipelineDynamicStateCreateInfo dyn = {VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO, nullptr, 0, 1, &dynPalette}; // viewportCount must be 0 or 1 when multiViewport is disabled { const auto break_vp = [&](CreatePipelineHelper &helper) { helper.vp_state_ci_.viewportCount = 2; helper.vp_state_ci_.pViewports = viewports; helper.vp_state_ci_.scissorCount = 2; helper.vp_state_ci_.pScissors = scissors; helper.vp_state_ci_.pNext = &vsrisci; helper.dyn_state_ci_ = dyn; vsrisci.shadingRateImageEnable = VK_TRUE; vsrisci.viewportCount = 2; }; CreatePipelineHelper::OneshotTest( *this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, vector({"VUID-VkPipelineViewportShadingRateImageStateCreateInfoNV-viewportCount-02054", "VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"})); } // viewportCounts must match { const auto break_vp = [&](CreatePipelineHelper &helper) { helper.vp_state_ci_.viewportCount = 1; helper.vp_state_ci_.pViewports = viewports; helper.vp_state_ci_.scissorCount = 1; helper.vp_state_ci_.pScissors = scissors; helper.vp_state_ci_.pNext = &vsrisci; helper.dyn_state_ci_ = dyn; vsrisci.shadingRateImageEnable = VK_TRUE; vsrisci.viewportCount = 0; }; CreatePipelineHelper::OneshotTest( *this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, vector({"VUID-VkPipelineViewportShadingRateImageStateCreateInfoNV-shadingRateImageEnable-02056"})); } // pShadingRatePalettes must not be NULL. { const auto break_vp = [&](CreatePipelineHelper &helper) { helper.vp_state_ci_.viewportCount = 1; helper.vp_state_ci_.pViewports = viewports; helper.vp_state_ci_.scissorCount = 1; helper.vp_state_ci_.pScissors = scissors; helper.vp_state_ci_.pNext = &vsrisci; vsrisci.shadingRateImageEnable = VK_TRUE; vsrisci.viewportCount = 1; }; CreatePipelineHelper::OneshotTest( *this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, vector({"VUID-VkPipelineViewportShadingRateImageStateCreateInfoNV-pDynamicStates-02057"})); } // Create an image without the SRI bit VkImageObj nonSRIimage(m_device); nonSRIimage.Init(256, 256, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(nonSRIimage.initialized()); VkImageView nonSRIview = nonSRIimage.targetView(VK_FORMAT_B8G8R8A8_UNORM); // Test SRI layout on non-SRI image VkImageMemoryBarrier img_barrier = {}; img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier.pNext = nullptr; img_barrier.srcAccessMask = 0; img_barrier.dstAccessMask = 0; img_barrier.oldLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV; img_barrier.image = nonSRIimage.handle(); img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; m_commandBuffer->begin(); // Error trying to convert it to SRI layout m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-oldLayout-02088"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // succeed converting it to GENERAL img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyNotFound(); // Test vkCmdBindShadingRateImageNV errors auto vkCmdBindShadingRateImageNV = (PFN_vkCmdBindShadingRateImageNV)vkGetDeviceProcAddr(m_device->device(), "vkCmdBindShadingRateImageNV"); // if the view is non-NULL, it must be R8_UINT, USAGE_SRI, image layout must match, layout must be valid m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBindShadingRateImageNV-imageView-02060"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBindShadingRateImageNV-imageView-02061"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBindShadingRateImageNV-imageView-02062"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBindShadingRateImageNV-imageLayout-02063"); vkCmdBindShadingRateImageNV(m_commandBuffer->handle(), nonSRIview, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); m_errorMonitor->VerifyFound(); // Test vkCmdSetViewportShadingRatePaletteNV errors auto vkCmdSetViewportShadingRatePaletteNV = (PFN_vkCmdSetViewportShadingRatePaletteNV)vkGetDeviceProcAddr(m_device->device(), "vkCmdSetViewportShadingRatePaletteNV"); VkShadingRatePaletteEntryNV paletteEntries[100] = {}; VkShadingRatePaletteNV palette = {100, paletteEntries}; VkShadingRatePaletteNV palettes[] = {palette, palette}; // errors on firstViewport/viewportCount m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewportShadingRatePaletteNV-firstViewport-02066"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewportShadingRatePaletteNV-firstViewport-02067"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewportShadingRatePaletteNV-firstViewport-02068"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewportShadingRatePaletteNV-viewportCount-02069"); vkCmdSetViewportShadingRatePaletteNV(m_commandBuffer->handle(), 20, 2, palettes); m_errorMonitor->VerifyFound(); // shadingRatePaletteEntryCount must be in range m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkShadingRatePaletteNV-shadingRatePaletteEntryCount-02071"); vkCmdSetViewportShadingRatePaletteNV(m_commandBuffer->handle(), 0, 1, palettes); m_errorMonitor->VerifyFound(); VkCoarseSampleLocationNV locations[100] = { {0, 0, 0}, {0, 0, 1}, {0, 1, 0}, {0, 1, 1}, {0, 1, 1}, // duplicate {1000, 0, 0}, // pixelX too large {0, 1000, 0}, // pixelY too large {0, 0, 1000}, // sample too large }; // Test custom sample orders, both via pipeline state and via dynamic state { VkCoarseSampleOrderCustomNV sampOrdBadShadingRate = {VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_PIXEL_NV, 1, 1, locations}; VkCoarseSampleOrderCustomNV sampOrdBadSampleCount = {VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV, 3, 1, locations}; VkCoarseSampleOrderCustomNV sampOrdBadSampleLocationCount = {VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV, 2, 2, locations}; VkCoarseSampleOrderCustomNV sampOrdDuplicateLocations = {VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV, 2, 1 * 2 * 2, &locations[1]}; VkCoarseSampleOrderCustomNV sampOrdOutOfRangeLocations = {VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV, 2, 1 * 2 * 2, &locations[4]}; VkCoarseSampleOrderCustomNV sampOrdTooLargeSampleLocationCount = { VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_4X4_PIXELS_NV, 4, 64, &locations[8]}; VkCoarseSampleOrderCustomNV sampOrdGood = {VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV, 2, 1 * 2 * 2, &locations[0]}; VkPipelineViewportCoarseSampleOrderStateCreateInfoNV csosci = { VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV}; csosci.sampleOrderType = VK_COARSE_SAMPLE_ORDER_TYPE_CUSTOM_NV; csosci.customSampleOrderCount = 1; using std::vector; struct TestCase { const VkCoarseSampleOrderCustomNV *order; vector vuids; }; vector test_cases = { {&sampOrdBadShadingRate, {"VUID-VkCoarseSampleOrderCustomNV-shadingRate-02073"}}, {&sampOrdBadSampleCount, {"VUID-VkCoarseSampleOrderCustomNV-sampleCount-02074", "VUID-VkCoarseSampleOrderCustomNV-sampleLocationCount-02075"}}, {&sampOrdBadSampleLocationCount, {"VUID-VkCoarseSampleOrderCustomNV-sampleLocationCount-02075"}}, {&sampOrdDuplicateLocations, {"VUID-VkCoarseSampleOrderCustomNV-pSampleLocations-02077"}}, {&sampOrdOutOfRangeLocations, {"VUID-VkCoarseSampleOrderCustomNV-pSampleLocations-02077", "VUID-VkCoarseSampleLocationNV-pixelX-02078", "VUID-VkCoarseSampleLocationNV-pixelY-02079", "VUID-VkCoarseSampleLocationNV-sample-02080"}}, {&sampOrdTooLargeSampleLocationCount, {"VUID-VkCoarseSampleOrderCustomNV-sampleLocationCount-02076", "VUID-VkCoarseSampleOrderCustomNV-pSampleLocations-02077"}}, {&sampOrdGood, {}}, }; for (const auto &test_case : test_cases) { const auto break_vp = [&](CreatePipelineHelper &helper) { helper.vp_state_ci_.pNext = &csosci; csosci.pCustomSampleOrders = test_case.order; }; CreatePipelineHelper::OneshotTest(*this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.vuids); } // Test vkCmdSetCoarseSampleOrderNV errors auto vkCmdSetCoarseSampleOrderNV = (PFN_vkCmdSetCoarseSampleOrderNV)vkGetDeviceProcAddr(m_device->device(), "vkCmdSetCoarseSampleOrderNV"); for (const auto &test_case : test_cases) { for (uint32_t i = 0; i < test_case.vuids.size(); ++i) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.vuids[i]); } vkCmdSetCoarseSampleOrderNV(m_commandBuffer->handle(), VK_COARSE_SAMPLE_ORDER_TYPE_CUSTOM_NV, 1, test_case.order); if (test_case.vuids.size()) { m_errorMonitor->VerifyFound(); } else { m_errorMonitor->VerifyNotFound(); } } m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetCoarseSampleOrderNV-sampleOrderType-02081"); vkCmdSetCoarseSampleOrderNV(m_commandBuffer->handle(), VK_COARSE_SAMPLE_ORDER_TYPE_PIXEL_MAJOR_NV, 1, &sampOrdGood); m_errorMonitor->VerifyFound(); } m_commandBuffer->end(); vkDestroyImageView(m_device->device(), view, NULL); vkDestroyImage(m_device->device(), image, NULL); vkFreeMemory(m_device->device(), image_memory, NULL); } TEST_F(VkLayerTest, CornerSampledImageNV) { TEST_DESCRIPTION("Test VK_NV_corner_sampled_image."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); std::array required_device_extensions = {{VK_NV_CORNER_SAMPLED_IMAGE_EXTENSION_NAME}}; for (auto device_extension : required_device_extensions) { if (DeviceExtensionSupported(gpu(), nullptr, device_extension)) { m_device_extension_names.push_back(device_extension); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, device_extension); return; } } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vkGetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); // Create a device that enables exclusive scissor but disables multiViewport auto corner_sampled_image_features = lvl_init_struct(); auto features2 = lvl_init_struct(&corner_sampled_image_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); VkImage image = VK_NULL_HANDLE; VkResult result = VK_RESULT_MAX_ENUM; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_1D; image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM; image_create_info.extent.width = 2; image_create_info.extent.height = 1; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT; image_create_info.queueFamilyIndexCount = 0; image_create_info.pQueueFamilyIndices = NULL; image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; image_create_info.flags = VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV; // image type must be 2D or 3D m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-flags-02050"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } // cube/depth not supported image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.extent.height = 2; image_create_info.format = VK_FORMAT_D24_UNORM_S8_UINT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-flags-02051"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM; // 2D width/height must be > 1 image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.extent.height = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-flags-02052"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } // 3D width/height/depth must be > 1 image_create_info.imageType = VK_IMAGE_TYPE_3D; image_create_info.extent.height = 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-flags-02053"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } image_create_info.imageType = VK_IMAGE_TYPE_2D; // Valid # of mip levels image_create_info.extent = {7, 7, 1}; image_create_info.mipLevels = 3; // 3 = ceil(log2(7)) result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyNotFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } image_create_info.extent = {8, 8, 1}; image_create_info.mipLevels = 3; // 3 = ceil(log2(8)) result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyNotFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } image_create_info.extent = {9, 9, 1}; image_create_info.mipLevels = 3; // 4 = ceil(log2(9)) result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyNotFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } // Invalid # of mip levels image_create_info.extent = {8, 8, 1}; image_create_info.mipLevels = 4; // 3 = ceil(log2(8)) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-mipLevels-00958"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } } TEST_F(VkLayerTest, MeshShaderNV) { TEST_DESCRIPTION("Test VK_NV_mesh_shader."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); std::array required_device_extensions = {{VK_NV_MESH_SHADER_EXTENSION_NAME}}; for (auto device_extension : required_device_extensions) { if (DeviceExtensionSupported(gpu(), nullptr, device_extension)) { m_device_extension_names.push_back(device_extension); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, device_extension); return; } } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vkGetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); // Create a device that enables mesh_shader auto mesh_shader_features = lvl_init_struct(); auto features2 = lvl_init_struct(&mesh_shader_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); features2.features.multiDrawIndirect = VK_FALSE; ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); static const char vertShaderText[] = "#version 450\n" "vec2 vertices[3];\n" "void main() {\n" " vertices[0] = vec2(-1.0, -1.0);\n" " vertices[1] = vec2( 1.0, -1.0);\n" " vertices[2] = vec2( 0.0, 1.0);\n" " gl_Position = vec4(vertices[gl_VertexIndex % 3], 0.0, 1.0);\n" " gl_PointSize = 1.0f;\n" "}\n"; static const char meshShaderText[] = "#version 450\n" "#extension GL_NV_mesh_shader : require\n" "layout(local_size_x = 1) in;\n" "layout(max_vertices = 3) out;\n" "layout(max_primitives = 1) out;\n" "layout(triangles) out;\n" "void main() {\n" " gl_MeshVerticesNV[0].gl_Position = vec4(-1.0, -1.0, 0, 1);\n" " gl_MeshVerticesNV[1].gl_Position = vec4( 1.0, -1.0, 0, 1);\n" " gl_MeshVerticesNV[2].gl_Position = vec4( 0.0, 1.0, 0, 1);\n" " gl_PrimitiveIndicesNV[0] = 0;\n" " gl_PrimitiveIndicesNV[1] = 1;\n" " gl_PrimitiveIndicesNV[2] = 2;\n" " gl_PrimitiveCountNV = 1;\n" "}\n"; VkShaderObj vs(m_device, vertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj ms(m_device, meshShaderText, VK_SHADER_STAGE_MESH_BIT_NV, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); // Test pipeline creation { // can't mix mesh with vertex const auto break_vp = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs.GetStageCreateInfo(), fs.GetStageCreateInfo(), ms.GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, vector({"VUID-VkGraphicsPipelineCreateInfo-pStages-02095"})); // vertex or mesh must be present const auto break_vp2 = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {fs.GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, break_vp2, VK_DEBUG_REPORT_ERROR_BIT_EXT, vector({"VUID-VkGraphicsPipelineCreateInfo-stage-02096"})); // vertexinput and inputassembly must be valid when vertex stage is present const auto break_vp3 = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs.GetStageCreateInfo(), fs.GetStageCreateInfo()}; helper.gp_ci_.pVertexInputState = nullptr; helper.gp_ci_.pInputAssemblyState = nullptr; }; CreatePipelineHelper::OneshotTest(*this, break_vp3, VK_DEBUG_REPORT_ERROR_BIT_EXT, vector({"VUID-VkGraphicsPipelineCreateInfo-pStages-02097", "VUID-VkGraphicsPipelineCreateInfo-pStages-02098"})); } PFN_vkCmdDrawMeshTasksIndirectNV vkCmdDrawMeshTasksIndirectNV = (PFN_vkCmdDrawMeshTasksIndirectNV)vkGetInstanceProcAddr(instance(), "vkCmdDrawMeshTasksIndirectNV"); VkBufferCreateInfo buffer_create_info = {VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO}; buffer_create_info.size = sizeof(uint32_t); buffer_create_info.usage = VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT; VkBuffer buffer; VkResult result = vkCreateBuffer(m_device->device(), &buffer_create_info, nullptr, &buffer); ASSERT_VK_SUCCESS(result); m_commandBuffer->begin(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawMeshTasksIndirectNV-drawCount-02146"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDrawMeshTasksIndirectNV-drawCount-02147"); vkCmdDrawMeshTasksIndirectNV(m_commandBuffer->handle(), buffer, 0, 2, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); vkDestroyBuffer(m_device->device(), buffer, 0); } TEST_F(VkLayerTest, MeshShaderDisabledNV) { TEST_DESCRIPTION("Test VK_NV_mesh_shader VUs with NV_mesh_shader disabled."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkEvent event; VkEventCreateInfo event_create_info{}; event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; vkCreateEvent(m_device->device(), &event_create_info, nullptr, &event); m_commandBuffer->begin(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetEvent-stageMask-02107"); vkCmdSetEvent(m_commandBuffer->handle(), event, VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetEvent-stageMask-02108"); vkCmdSetEvent(m_commandBuffer->handle(), event, VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdResetEvent-stageMask-02109"); vkCmdResetEvent(m_commandBuffer->handle(), event, VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdResetEvent-stageMask-02110"); vkCmdResetEvent(m_commandBuffer->handle(), event, VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdWaitEvents-srcStageMask-02111"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdWaitEvents-dstStageMask-02113"); vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV, VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV, 0, nullptr, 0, nullptr, 0, nullptr); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdWaitEvents-srcStageMask-02112"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdWaitEvents-dstStageMask-02114"); vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV, VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV, 0, nullptr, 0, nullptr, 0, nullptr); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-srcStageMask-02115"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-dstStageMask-02117"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV, VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV, 0, 0, nullptr, 0, nullptr, 0, nullptr); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-srcStageMask-02116"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-dstStageMask-02118"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV, VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV, 0, 0, nullptr, 0, nullptr, 0, nullptr); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); VkSemaphoreCreateInfo semaphore_create_info = {}; semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; VkSemaphore semaphore; ASSERT_VK_SUCCESS(vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore)); VkPipelineStageFlags stage_flags = VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV | VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV; VkSubmitInfo submit_info = {}; // Signal the semaphore so the next test can wait on it. submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.signalSemaphoreCount = 1; submit_info.pSignalSemaphores = &semaphore; vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyNotFound(); submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.signalSemaphoreCount = 0; submit_info.pSignalSemaphores = nullptr; submit_info.waitSemaphoreCount = 1; submit_info.pWaitSemaphores = &semaphore; submit_info.pWaitDstStageMask = &stage_flags; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubmitInfo-pWaitDstStageMask-02089"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubmitInfo-pWaitDstStageMask-02090"); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkPipelineShaderStageCreateInfo meshStage = {VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO}; meshStage = vs.GetStageCreateInfo(); meshStage.stage = VK_SHADER_STAGE_MESH_BIT_NV; VkPipelineShaderStageCreateInfo taskStage = {VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO}; taskStage = vs.GetStageCreateInfo(); taskStage.stage = VK_SHADER_STAGE_TASK_BIT_NV; // mesh and task shaders not supported const auto break_vp = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {meshStage, taskStage, vs.GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest( *this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, vector({"VUID-VkPipelineShaderStageCreateInfo-pName-00707", "VUID-VkPipelineShaderStageCreateInfo-pName-00707", "VUID-VkPipelineShaderStageCreateInfo-stage-02091", "VUID-VkPipelineShaderStageCreateInfo-stage-02092"})); vkDestroyEvent(m_device->device(), event, nullptr); vkDestroySemaphore(m_device->device(), semaphore, nullptr); } TEST_F(VkLayerTest, InlineUniformBlockEXT) { TEST_DESCRIPTION("Test VK_EXT_inline_uniform_block."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); std::array required_device_extensions = {VK_KHR_MAINTENANCE1_EXTENSION_NAME, VK_EXT_INLINE_UNIFORM_BLOCK_EXTENSION_NAME}; for (auto device_extension : required_device_extensions) { if (DeviceExtensionSupported(gpu(), nullptr, device_extension)) { m_device_extension_names.push_back(device_extension); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, device_extension); return; } } // Enable descriptor indexing if supported, but don't require it. bool supportsDescriptorIndexing = true; required_device_extensions = {VK_KHR_MAINTENANCE3_EXTENSION_NAME, VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME}; for (auto device_extension : required_device_extensions) { if (DeviceExtensionSupported(gpu(), nullptr, device_extension)) { m_device_extension_names.push_back(device_extension); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, device_extension); supportsDescriptorIndexing = false; return; } } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vkGetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); auto descriptor_indexing_features = lvl_init_struct(); void *pNext = supportsDescriptorIndexing ? &descriptor_indexing_features : nullptr; // Create a device that enables inline_uniform_block auto inline_uniform_block_features = lvl_init_struct(pNext); auto features2 = lvl_init_struct(&inline_uniform_block_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR = (PFN_vkGetPhysicalDeviceProperties2KHR)vkGetInstanceProcAddr(instance(), "vkGetPhysicalDeviceProperties2KHR"); assert(vkGetPhysicalDeviceProperties2KHR != nullptr); // Get the inline uniform block limits auto inline_uniform_props = lvl_init_struct(); auto prop2 = lvl_init_struct(&inline_uniform_props); vkGetPhysicalDeviceProperties2KHR(gpu(), &prop2); ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); VkDescriptorSetLayoutBinding dslb = {}; std::vector dslb_vec = {}; VkDescriptorSetLayoutCreateInfo ds_layout_ci = {}; ds_layout_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; VkDescriptorSetLayout ds_layout = {}; // Test too many bindings dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT; dslb.descriptorCount = 4; dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; uint32_t maxBlocks = std::max(inline_uniform_props.maxPerStageDescriptorInlineUniformBlocks, inline_uniform_props.maxDescriptorSetInlineUniformBlocks); for (uint32_t i = 0; i < 1 + maxBlocks; ++i) { dslb.binding = i; dslb_vec.push_back(dslb); } ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); VkResult err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02214"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02216"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02215"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02217"); VkPipelineLayoutCreateInfo pipeline_layout_ci = {}; pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipeline_layout_ci.pNext = NULL; pipeline_layout_ci.setLayoutCount = 1; pipeline_layout_ci.pSetLayouts = &ds_layout; VkPipelineLayout pipeline_layout = VK_NULL_HANDLE; err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, nullptr); ds_layout = VK_NULL_HANDLE; // Single binding that's too large and is not a multiple of 4 dslb.binding = 0; dslb.descriptorCount = inline_uniform_props.maxInlineUniformBlockSize + 1; ds_layout_ci.bindingCount = 1; ds_layout_ci.pBindings = &dslb; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetLayoutBinding-descriptorType-02209"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetLayoutBinding-descriptorType-02210"); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); m_errorMonitor->VerifyFound(); vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, nullptr); ds_layout = VK_NULL_HANDLE; // Pool size must be a multiple of 4 VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT; ds_type_count.descriptorCount = 33; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.flags = 0; ds_pool_ci.maxSets = 2; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool = VK_NULL_HANDLE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorPoolSize-type-02218"); err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); m_errorMonitor->VerifyFound(); if (ds_pool) { vkDestroyDescriptorPool(m_device->handle(), ds_pool, nullptr); ds_pool = VK_NULL_HANDLE; } // Create a valid pool ds_type_count.descriptorCount = 32; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); m_errorMonitor->VerifyNotFound(); // Create two valid sets with 8 bytes each dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT; dslb.descriptorCount = 8; dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); dslb.binding = 1; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = &dslb_vec[0]; err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); m_errorMonitor->VerifyNotFound(); VkDescriptorSet descriptor_sets[2]; VkDescriptorSetLayout set_layouts[2] = {ds_layout, ds_layout}; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 2; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = set_layouts; err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, descriptor_sets); m_errorMonitor->VerifyNotFound(); // Test invalid VkWriteDescriptorSet parameters (array element and size must be multiple of 4) VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptor_sets[0]; descriptor_write.dstBinding = 0; descriptor_write.dstArrayElement = 0; descriptor_write.descriptorCount = 3; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT; uint32_t dummyData[8] = {}; VkWriteDescriptorSetInlineUniformBlockEXT write_inline_uniform = {}; write_inline_uniform.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT; write_inline_uniform.dataSize = 3; write_inline_uniform.pData = &dummyData[0]; descriptor_write.pNext = &write_inline_uniform; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-02220"); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); descriptor_write.dstArrayElement = 1; descriptor_write.descriptorCount = 4; write_inline_uniform.dataSize = 4; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-02219"); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); descriptor_write.pNext = nullptr; descriptor_write.dstArrayElement = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-02221"); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); descriptor_write.pNext = &write_inline_uniform; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyNotFound(); // Test invalid VkCopyDescriptorSet parameters (array element and size must be multiple of 4) VkCopyDescriptorSet copy_ds_update = {}; copy_ds_update.sType = VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET; copy_ds_update.srcSet = descriptor_sets[0]; copy_ds_update.srcBinding = 0; copy_ds_update.srcArrayElement = 0; copy_ds_update.dstSet = descriptor_sets[1]; copy_ds_update.dstBinding = 0; copy_ds_update.dstArrayElement = 0; copy_ds_update.descriptorCount = 4; copy_ds_update.srcArrayElement = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkCopyDescriptorSet-srcBinding-02223"); vkUpdateDescriptorSets(m_device->device(), 0, NULL, 1, ©_ds_update); m_errorMonitor->VerifyFound(); copy_ds_update.srcArrayElement = 0; copy_ds_update.dstArrayElement = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkCopyDescriptorSet-dstBinding-02224"); vkUpdateDescriptorSets(m_device->device(), 0, NULL, 1, ©_ds_update); m_errorMonitor->VerifyFound(); copy_ds_update.dstArrayElement = 0; copy_ds_update.descriptorCount = 5; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkCopyDescriptorSet-srcBinding-02225"); vkUpdateDescriptorSets(m_device->device(), 0, NULL, 1, ©_ds_update); m_errorMonitor->VerifyFound(); copy_ds_update.descriptorCount = 4; vkUpdateDescriptorSets(m_device->device(), 0, NULL, 1, ©_ds_update); m_errorMonitor->VerifyNotFound(); vkDestroyDescriptorPool(m_device->handle(), ds_pool, nullptr); vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, nullptr); } TEST_F(VkLayerTest, FramebufferMixedSamplesNV) { TEST_DESCRIPTION("Verify VK_NV_framebuffer_mixed_samples."); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_NV_FRAMEBUFFER_MIXED_SAMPLES_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_NV_FRAMEBUFFER_MIXED_SAMPLES_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_NV_FRAMEBUFFER_MIXED_SAMPLES_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); struct TestCase { VkSampleCountFlagBits color_samples; VkSampleCountFlagBits depth_samples; VkSampleCountFlagBits raster_samples; VkBool32 depth_test; VkBool32 sample_shading; uint32_t table_count; bool positiveTest; std::string vuid; }; std::vector test_cases = { {VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_4_BIT, VK_FALSE, VK_FALSE, 1, true, "VUID-VkGraphicsPipelineCreateInfo-subpass-00757"}, {VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_1_BIT, VK_SAMPLE_COUNT_8_BIT, VK_FALSE, VK_FALSE, 4, false, "VUID-VkPipelineCoverageModulationStateCreateInfoNV-coverageModulationTableEnable-01405"}, {VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_1_BIT, VK_SAMPLE_COUNT_8_BIT, VK_FALSE, VK_FALSE, 2, true, "VUID-VkPipelineCoverageModulationStateCreateInfoNV-coverageModulationTableEnable-01405"}, {VK_SAMPLE_COUNT_1_BIT, VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_8_BIT, VK_TRUE, VK_FALSE, 1, false, "VUID-VkGraphicsPipelineCreateInfo-subpass-01411"}, {VK_SAMPLE_COUNT_1_BIT, VK_SAMPLE_COUNT_8_BIT, VK_SAMPLE_COUNT_8_BIT, VK_TRUE, VK_FALSE, 1, true, "VUID-VkGraphicsPipelineCreateInfo-subpass-01411"}, {VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_1_BIT, VK_SAMPLE_COUNT_1_BIT, VK_FALSE, VK_FALSE, 1, false, "VUID-VkGraphicsPipelineCreateInfo-subpass-01412"}, {VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_1_BIT, VK_SAMPLE_COUNT_4_BIT, VK_FALSE, VK_FALSE, 1, true, "VUID-VkGraphicsPipelineCreateInfo-subpass-01412"}, {VK_SAMPLE_COUNT_1_BIT, VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_4_BIT, VK_FALSE, VK_TRUE, 1, false, "VUID-VkPipelineMultisampleStateCreateInfo-rasterizationSamples-01415"}, {VK_SAMPLE_COUNT_1_BIT, VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_4_BIT, VK_FALSE, VK_FALSE, 1, true, "VUID-VkPipelineMultisampleStateCreateInfo-rasterizationSamples-01415"}, {VK_SAMPLE_COUNT_1_BIT, VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_8_BIT, VK_FALSE, VK_FALSE, 1, true, "VUID-VkGraphicsPipelineCreateInfo-subpass-00757"}}; for (const auto &test_case : test_cases) { VkAttachmentDescription att[2] = {{}, {}}; att[0].format = VK_FORMAT_R8G8B8A8_UNORM; att[0].samples = test_case.color_samples; att[0].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; att[0].finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; att[1].format = VK_FORMAT_D24_UNORM_S8_UINT; att[1].samples = test_case.depth_samples; att[1].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; att[1].finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; VkAttachmentReference cr = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkAttachmentReference dr = {1, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}; VkSubpassDescription sp = {}; sp.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; sp.colorAttachmentCount = 1; sp.pColorAttachments = &cr; sp.pResolveAttachments = NULL; sp.pDepthStencilAttachment = &dr; VkRenderPassCreateInfo rpi = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO}; rpi.attachmentCount = 2; rpi.pAttachments = att; rpi.subpassCount = 1; rpi.pSubpasses = &sp; VkRenderPass rp; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription-pDepthStencilAttachment-01418"); VkResult err = vkCreateRenderPass(m_device->device(), &rpi, nullptr, &rp); m_errorMonitor->VerifyNotFound(); ASSERT_VK_SUCCESS(err); VkPipelineDepthStencilStateCreateInfo ds = {VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO}; VkPipelineCoverageModulationStateCreateInfoNV cmi = {VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV}; // Create a dummy modulation table that can be used for the positive // coverageModulationTableCount test. std::vector cm_table{}; const auto break_samples = [&cmi, &rp, &ds, &cm_table, &test_case](CreatePipelineHelper &helper) { cm_table.resize(test_case.raster_samples / test_case.color_samples); cmi.flags = 0; cmi.coverageModulationTableEnable = (test_case.table_count > 1); cmi.coverageModulationTableCount = test_case.table_count; cmi.pCoverageModulationTable = cm_table.data(); ds.depthTestEnable = test_case.depth_test; helper.pipe_ms_state_ci_.pNext = &cmi; helper.pipe_ms_state_ci_.rasterizationSamples = test_case.raster_samples; helper.pipe_ms_state_ci_.sampleShadingEnable = test_case.sample_shading; helper.gp_ci_.renderPass = rp; helper.gp_ci_.pDepthStencilState = &ds; }; CreatePipelineHelper::OneshotTest(*this, break_samples, VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.vuid, test_case.positiveTest); vkDestroyRenderPass(m_device->device(), rp, nullptr); } } TEST_F(VkLayerTest, FramebufferMixedSamples) { TEST_DESCRIPTION("Verify that the expected VUIds are hits when VK_NV_framebuffer_mixed_samples is disabled."); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); struct TestCase { VkSampleCountFlagBits color_samples; VkSampleCountFlagBits depth_samples; VkSampleCountFlagBits raster_samples; bool positiveTest; }; std::vector test_cases = { {VK_SAMPLE_COUNT_2_BIT, VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_8_BIT, false}, // Fails vkCreateRenderPass and vkCreateGraphicsPipeline {VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_8_BIT, false}, // Fails vkCreateGraphicsPipeline {VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_4_BIT, true} // Pass }; for (const auto &test_case : test_cases) { VkAttachmentDescription att[2] = {{}, {}}; att[0].format = VK_FORMAT_R8G8B8A8_UNORM; att[0].samples = test_case.color_samples; att[0].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; att[0].finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; att[1].format = VK_FORMAT_D24_UNORM_S8_UINT; att[1].samples = test_case.depth_samples; att[1].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; att[1].finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; VkAttachmentReference cr = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkAttachmentReference dr = {1, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}; VkSubpassDescription sp = {}; sp.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; sp.colorAttachmentCount = 1; sp.pColorAttachments = &cr; sp.pResolveAttachments = NULL; sp.pDepthStencilAttachment = &dr; VkRenderPassCreateInfo rpi = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO}; rpi.attachmentCount = 2; rpi.pAttachments = att; rpi.subpassCount = 1; rpi.pSubpasses = &sp; VkRenderPass rp; if (test_case.color_samples == test_case.depth_samples) { m_errorMonitor->ExpectSuccess(); } else { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription-pDepthStencilAttachment-01418"); } VkResult err = vkCreateRenderPass(m_device->device(), &rpi, nullptr, &rp); if (test_case.color_samples == test_case.depth_samples) { m_errorMonitor->VerifyNotFound(); } else { m_errorMonitor->VerifyFound(); continue; } ASSERT_VK_SUCCESS(err); VkPipelineDepthStencilStateCreateInfo ds = {VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO}; const auto break_samples = [&rp, &ds, &test_case](CreatePipelineHelper &helper) { helper.pipe_ms_state_ci_.rasterizationSamples = test_case.raster_samples; helper.gp_ci_.renderPass = rp; helper.gp_ci_.pDepthStencilState = &ds; }; CreatePipelineHelper::OneshotTest(*this, break_samples, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-subpass-00757", test_case.positiveTest); vkDestroyRenderPass(m_device->device(), rp, nullptr); } } TEST_F(VkLayerTest, FragmentCoverageToColorNV) { TEST_DESCRIPTION("Verify VK_NV_fragment_coverage_to_color."); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_NV_FRAGMENT_COVERAGE_TO_COLOR_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_NV_FRAGMENT_COVERAGE_TO_COLOR_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_NV_FRAGMENT_COVERAGE_TO_COLOR_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); struct TestCase { VkFormat format; VkBool32 enabled; uint32_t location; bool positive; }; const std::array test_cases = {{ {VK_FORMAT_R8G8B8A8_UNORM, VK_FALSE, 0, true}, {VK_FORMAT_R8_UINT, VK_TRUE, 1, true}, {VK_FORMAT_R16_UINT, VK_TRUE, 1, true}, {VK_FORMAT_R16_SINT, VK_TRUE, 1, true}, {VK_FORMAT_R32_UINT, VK_TRUE, 1, true}, {VK_FORMAT_R32_SINT, VK_TRUE, 1, true}, {VK_FORMAT_R32_SINT, VK_TRUE, 2, false}, {VK_FORMAT_R8_SINT, VK_TRUE, 3, false}, {VK_FORMAT_R8G8B8A8_UNORM, VK_TRUE, 1, false}, }}; for (const auto &test_case : test_cases) { std::array att = {{{}, {}}}; att[0].format = VK_FORMAT_R8G8B8A8_UNORM; att[0].samples = VK_SAMPLE_COUNT_1_BIT; att[0].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; att[0].finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; att[1].format = VK_FORMAT_R8G8B8A8_UNORM; att[1].samples = VK_SAMPLE_COUNT_1_BIT; att[1].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; att[1].finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; if (test_case.location < att.size()) { att[test_case.location].format = test_case.format; } const std::array cr = {{{0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {1, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {VK_ATTACHMENT_UNUSED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}}}; VkSubpassDescription sp = {}; sp.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; sp.colorAttachmentCount = cr.size(); sp.pColorAttachments = cr.data(); VkRenderPassCreateInfo rpi = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO}; rpi.attachmentCount = att.size(); rpi.pAttachments = att.data(); rpi.subpassCount = 1; rpi.pSubpasses = &sp; const std::array cba = {{{}, {}, {}}}; VkPipelineColorBlendStateCreateInfo cbi = {VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO}; cbi.attachmentCount = cba.size(); cbi.pAttachments = cba.data(); VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpi, nullptr, &rp); ASSERT_VK_SUCCESS(err); VkPipelineCoverageToColorStateCreateInfoNV cci = {VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV}; const auto break_samples = [&cci, &cbi, &rp, &test_case](CreatePipelineHelper &helper) { cci.coverageToColorEnable = test_case.enabled; cci.coverageToColorLocation = test_case.location; helper.pipe_ms_state_ci_.pNext = &cci; helper.gp_ci_.renderPass = rp; helper.gp_ci_.pColorBlendState = &cbi; }; CreatePipelineHelper::OneshotTest(*this, break_samples, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineCoverageToColorStateCreateInfoNV-coverageToColorEnable-01404", test_case.positive); vkDestroyRenderPass(m_device->device(), rp, nullptr); } } TEST_F(VkPositiveLayerTest, RayTracingPipelineNV) { TEST_DESCRIPTION("Test VK_NV_ray_tracing."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); std::array required_device_extensions = { {VK_NV_RAY_TRACING_EXTENSION_NAME, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME}}; for (auto device_extension : required_device_extensions) { if (DeviceExtensionSupported(gpu(), nullptr, device_extension)) { m_device_extension_names.push_back(device_extension); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, device_extension); return; } } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vkGetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); ASSERT_NO_FATAL_FAILURE(InitState()); m_errorMonitor->ExpectSuccess(); static const char rayGenShaderText[] = "#version 460 core \n" "#extension GL_NV_ray_tracing : require \n" "layout(set = 0, binding = 0, rgba8) uniform image2D image; \n" "layout(set = 0, binding = 1) uniform accelerationStructureNV as; \n" " \n" "layout(location = 0) rayPayloadNV float payload; \n" " \n" "void main() \n" "{ \n" " vec4 col = vec4(0, 0, 0, 1); \n" " \n" " vec3 origin = vec3(float(gl_LaunchIDNV.x)/float(gl_LaunchSizeNV.x), float(gl_LaunchIDNV.y)/float(gl_LaunchSizeNV.y), " "1.0); \n" " vec3 dir = vec3(0.0, 0.0, -1.0); \n" " \n" " payload = 0.5; \n" " traceNV(as, gl_RayFlagsCullBackFacingTrianglesNV, 0xff, 0, 1, 0, origin, 0.0, dir, 1000.0, 0); \n" " \n" " col.y = payload; \n" " \n" " imageStore(image, ivec2(gl_LaunchIDNV.xy), col); \n" "}\n"; static char const closestHitShaderText[] = "#version 460 core \n" "#extension GL_NV_ray_tracing : require \n" "layout(location = 0) rayPayloadInNV float hitValue; \n" " \n" "void main() { \n" " hitValue = 1.0; \n" "} \n"; static char const missShaderText[] = "#version 460 core \n" "#extension GL_NV_ray_tracing : require \n" "layout(location = 0) rayPayloadInNV float hitValue; \n" " \n" "void main() { \n" " hitValue = 0.0; \n" "} \n"; VkShaderObj rgs(m_device, rayGenShaderText, VK_SHADER_STAGE_RAYGEN_BIT_NV, this); VkShaderObj chs(m_device, closestHitShaderText, VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV, this); VkShaderObj mis(m_device, missShaderText, VK_SHADER_STAGE_MISS_BIT_NV, this); VkPipelineShaderStageCreateInfo rayStages[3]; memset(&rayStages[0], 0, sizeof(rayStages)); rayStages[0] = rgs.GetStageCreateInfo(); rayStages[0].stage = VK_SHADER_STAGE_RAYGEN_BIT_NV; rayStages[1] = chs.GetStageCreateInfo(); rayStages[1].stage = VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV; rayStages[2] = mis.GetStageCreateInfo(); rayStages[2].stage = VK_SHADER_STAGE_MISS_BIT_NV; VkRayTracingShaderGroupCreateInfoNV groups[3]; memset(&groups[0], 0, sizeof(groups)); groups[0].sType = VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV; groups[0].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV; groups[0].generalShader = 0; groups[0].closestHitShader = VK_SHADER_UNUSED_NV; groups[0].anyHitShader = VK_SHADER_UNUSED_NV; groups[0].intersectionShader = VK_SHADER_UNUSED_NV; groups[1].sType = VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV; groups[1].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_NV; groups[1].generalShader = VK_SHADER_UNUSED_NV; groups[1].closestHitShader = 1; groups[1].anyHitShader = VK_SHADER_UNUSED_NV; groups[1].intersectionShader = VK_SHADER_UNUSED_NV; groups[2].sType = VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV; groups[2].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV; groups[2].generalShader = 2; groups[2].closestHitShader = VK_SHADER_UNUSED_NV; groups[2].anyHitShader = VK_SHADER_UNUSED_NV; groups[2].intersectionShader = VK_SHADER_UNUSED_NV; const uint32_t bindingCount = 2; VkDescriptorSetLayoutBinding binding[bindingCount] = {}; binding[0].binding = 0; binding[0].descriptorCount = 1; binding[0].stageFlags = VK_SHADER_STAGE_RAYGEN_BIT_NV; binding[0].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; binding[1].binding = 1; binding[1].descriptorCount = 1; binding[1].stageFlags = VK_SHADER_STAGE_RAYGEN_BIT_NV; binding[1].descriptorType = VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV; VkDescriptorSetLayoutCreateInfo descriptorSetEntry = {VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO}; descriptorSetEntry.bindingCount = bindingCount; descriptorSetEntry.pBindings = binding; VkDescriptorSetLayout descriptorSetLayout; VkResult err = vkCreateDescriptorSetLayout(m_device->device(), &descriptorSetEntry, 0, &descriptorSetLayout); ASSERT_VK_SUCCESS(err); VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo = {VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO}; pipelineLayoutCreateInfo.setLayoutCount = 1; pipelineLayoutCreateInfo.pSetLayouts = &descriptorSetLayout; VkPipelineLayout pipelineLayout; err = vkCreatePipelineLayout(m_device->device(), &pipelineLayoutCreateInfo, 0, &pipelineLayout); ASSERT_VK_SUCCESS(err); PFN_vkCreateRayTracingPipelinesNV vkCreateRayTracingPipelinesNV = (PFN_vkCreateRayTracingPipelinesNV)vkGetInstanceProcAddr(instance(), "vkCreateRayTracingPipelinesNV"); VkRayTracingPipelineCreateInfoNV rayPipelineInfo = {VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV}; rayPipelineInfo.layout = pipelineLayout; rayPipelineInfo.stageCount = 3; rayPipelineInfo.pStages = &rayStages[0]; rayPipelineInfo.groupCount = 3; rayPipelineInfo.pGroups = &groups[0]; VkPipeline rayPipeline; err = vkCreateRayTracingPipelinesNV(m_device->device(), VK_NULL_HANDLE, 1, &rayPipelineInfo, 0, &rayPipeline); ASSERT_VK_SUCCESS(err); vkDestroyPipeline(m_device->device(), rayPipeline, 0); vkDestroyPipelineLayout(m_device->device(), pipelineLayout, 0); vkDestroyDescriptorSetLayout(m_device->device(), descriptorSetLayout, 0); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, CreateYCbCrSampler) { TEST_DESCRIPTION("Verify YCbCr sampler creation."); // Test requires API 1.1 or (API 1.0 + SamplerYCbCr extension). Request API 1.1 SetTargetApiVersion(VK_API_VERSION_1_1); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // In case we don't have API 1.1+, try enabling the extension directly (and it's dependencies) if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitState()); VkDevice dev = m_device->device(); // Verify we have the requested support bool ycbcr_support = (DeviceExtensionEnabled(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME) || (DeviceValidationVersion() >= VK_API_VERSION_1_1)); if (!ycbcr_support) { printf("%s Did not find required device extension %s; test skipped.\n", kSkipPrefix, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); return; } VkSamplerYcbcrConversion ycbcr_conv = VK_NULL_HANDLE; VkSamplerYcbcrConversionCreateInfo sycci = {}; sycci.sType = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO; sycci.format = VK_FORMAT_UNDEFINED; sycci.ycbcrModel = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY; sycci.ycbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_FULL; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSamplerYcbcrConversionCreateInfo-format-01649"); vkCreateSamplerYcbcrConversion(dev, &sycci, NULL, &ycbcr_conv); m_errorMonitor->VerifyFound(); } TEST_F(VkPositiveLayerTest, ViewportArray2NV) { TEST_DESCRIPTION("Test to validate VK_NV_viewport_array2"); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); VkPhysicalDeviceFeatures available_features = {}; ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&available_features)); if (!available_features.multiViewport) { printf("VkPhysicalDeviceFeatures::multiViewport is not supported, skipping tests\n"); return; } if (!available_features.tessellationShader) { printf("VkPhysicalDeviceFeatures::tessellationShader is not supported, skipping tests\n"); return; } if (!available_features.geometryShader) { printf("VkPhysicalDeviceFeatures::geometryShader is not supported, skipping tests\n"); return; } if (DeviceExtensionSupported(gpu(), nullptr, VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const char tcs_src[] = R"( #version 450 layout(vertices = 3) out; void main() { gl_TessLevelOuter[0] = 4.0f; gl_TessLevelOuter[1] = 4.0f; gl_TessLevelOuter[2] = 4.0f; gl_TessLevelInner[0] = 3.0f; gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position; })"; const char fs_src[] = R"( #version 450 layout(location = 0) out vec4 outColor; void main() { outColor = vec4(1.0f); })"; // Create tessellation control and fragment shader here since they will not be // modified by the different test cases. VkShaderObj tcs(m_device, tcs_src, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, this); VkShaderObj fs(m_device, fs_src, VK_SHADER_STAGE_FRAGMENT_BIT, this); std::vector vps = {{0.0f, 0.0f, m_width / 2.0f, m_height}, {m_width / 2.0f, 0.0f, m_width / 2.0f, m_height}}; std::vector scs = { {{0, 0}, {static_cast(m_width) / 2, static_cast(m_height)}}, {{static_cast(m_width) / 2, 0}, {static_cast(m_width) / 2, static_cast(m_height)}}}; enum class TestStage { VERTEX = 0, TESSELLATION_EVAL = 1, GEOMETRY = 2 }; std::array vertex_stages = {{TestStage::VERTEX, TestStage::TESSELLATION_EVAL, TestStage::GEOMETRY}}; // Verify that the usage of gl_ViewportMask[] in the allowed vertex processing // stages does not cause any errors. for (auto stage : vertex_stages) { m_errorMonitor->ExpectSuccess(); VkPipelineInputAssemblyStateCreateInfo iaci = {VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO}; iaci.topology = (stage != TestStage::VERTEX) ? VK_PRIMITIVE_TOPOLOGY_PATCH_LIST : VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; VkPipelineTessellationStateCreateInfo tsci = {VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO}; tsci.patchControlPoints = 3; const VkPipelineLayoutObj pl(m_device); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.SetInputAssembly(&iaci); pipe.SetViewport(vps); pipe.SetScissor(scs); pipe.AddShader(&fs); std::stringstream vs_src, tes_src, geom_src; vs_src << R"( #version 450 #extension GL_NV_viewport_array2 : require vec2 positions[3] = { vec2( 0.0f, -0.5f), vec2( 0.5f, 0.5f), vec2(-0.5f, 0.5f) }; void main() {)"; // Write viewportMask if the vertex shader is the last vertex processing stage. if (stage == TestStage::VERTEX) { vs_src << "gl_ViewportMask[0] = 3;\n"; } vs_src << R"( gl_Position = vec4(positions[gl_VertexIndex % 3], 0.0, 1.0); })"; VkShaderObj vs(m_device, vs_src.str().c_str(), VK_SHADER_STAGE_VERTEX_BIT, this); pipe.AddShader(&vs); std::unique_ptr tes, geom; if (stage >= TestStage::TESSELLATION_EVAL) { tes_src << R"( #version 450 #extension GL_NV_viewport_array2 : require layout(triangles) in; void main() { gl_Position = (gl_in[0].gl_Position * gl_TessCoord.x + gl_in[1].gl_Position * gl_TessCoord.y + gl_in[2].gl_Position * gl_TessCoord.z);)"; // Write viewportMask if the tess eval shader is the last vertex processing stage. if (stage == TestStage::TESSELLATION_EVAL) { tes_src << "gl_ViewportMask[0] = 3;\n"; } tes_src << "}"; tes = std::unique_ptr( new VkShaderObj(m_device, tes_src.str().c_str(), VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, this)); pipe.AddShader(tes.get()); pipe.AddShader(&tcs); pipe.SetTessellation(&tsci); } if (stage >= TestStage::GEOMETRY) { geom_src << R"( #version 450 #extension GL_NV_viewport_array2 : require layout(triangles) in; layout(triangle_strip, max_vertices = 3) out; void main() { gl_ViewportMask[0] = 3; for(int i = 0; i < 3; ++i) { gl_Position = gl_in[i].gl_Position; EmitVertex(); } })"; geom = std::unique_ptr(new VkShaderObj(m_device, geom_src.str().c_str(), VK_SHADER_STAGE_GEOMETRY_BIT, this)); pipe.AddShader(geom.get()); } pipe.CreateVKPipeline(pl.handle(), renderPass()); m_errorMonitor->VerifyNotFound(); } } #ifdef VK_USE_PLATFORM_ANDROID_KHR #include "android_ndk_types.h" TEST_F(VkLayerTest, AndroidHardwareBufferImageCreate) { TEST_DESCRIPTION("Verify AndroidHardwareBuffer image create info."); SetTargetApiVersion(VK_API_VERSION_1_1); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) && // Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) { m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME); m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME); } else { printf("%s %s extension not supported, skipping tests\n", kSkipPrefix, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkDevice dev = m_device->device(); VkImage img = VK_NULL_HANDLE; auto reset_img = [&img, dev]() { if (VK_NULL_HANDLE != img) vkDestroyImage(dev, img, NULL); img = VK_NULL_HANDLE; }; VkImageCreateInfo ici = {}; ici.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ici.pNext = nullptr; ici.imageType = VK_IMAGE_TYPE_2D; ici.arrayLayers = 1; ici.extent = {64, 64, 1}; ici.format = VK_FORMAT_UNDEFINED; ici.mipLevels = 1; ici.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; ici.samples = VK_SAMPLE_COUNT_1_BIT; ici.tiling = VK_IMAGE_TILING_OPTIMAL; ici.usage = VK_IMAGE_USAGE_SAMPLED_BIT; // undefined format m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-pNext-01975"); vkCreateImage(dev, &ici, NULL, &img); m_errorMonitor->VerifyFound(); reset_img(); // also undefined format VkExternalFormatANDROID efa = {}; efa.sType = VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID; efa.externalFormat = 0; ici.pNext = &efa; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-pNext-01975"); vkCreateImage(dev, &ici, NULL, &img); m_errorMonitor->VerifyFound(); reset_img(); // undefined format with an unknown external format efa.externalFormat = 0xBADC0DE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkExternalFormatANDROID-externalFormat-01894"); vkCreateImage(dev, &ici, NULL, &img); m_errorMonitor->VerifyFound(); reset_img(); AHardwareBuffer *ahb; AHardwareBuffer_Desc ahb_desc = {}; ahb_desc.format = AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM; ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE; ahb_desc.width = 64; ahb_desc.height = 64; ahb_desc.layers = 1; // Allocate an AHardwareBuffer AHardwareBuffer_allocate(&ahb_desc, &ahb); // Retrieve it's properties to make it's external format 'known' (AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM) VkAndroidHardwareBufferFormatPropertiesANDROID ahb_fmt_props = {}; ahb_fmt_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID; VkAndroidHardwareBufferPropertiesANDROID ahb_props = {}; ahb_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID; ahb_props.pNext = &ahb_fmt_props; PFN_vkGetAndroidHardwareBufferPropertiesANDROID pfn_GetAHBProps = (PFN_vkGetAndroidHardwareBufferPropertiesANDROID)vkGetDeviceProcAddr(dev, "vkGetAndroidHardwareBufferPropertiesANDROID"); ASSERT_TRUE(pfn_GetAHBProps != nullptr); pfn_GetAHBProps(dev, ahb, &ahb_props); // a defined image format with a non-zero external format ici.format = VK_FORMAT_R8G8B8A8_UNORM; efa.externalFormat = AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-pNext-01974"); vkCreateImage(dev, &ici, NULL, &img); m_errorMonitor->VerifyFound(); reset_img(); ici.format = VK_FORMAT_UNDEFINED; // external format while MUTABLE ici.flags = VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-pNext-02396"); vkCreateImage(dev, &ici, NULL, &img); m_errorMonitor->VerifyFound(); reset_img(); ici.flags = 0; // external format while usage other than SAMPLED ici.usage |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-pNext-02397"); vkCreateImage(dev, &ici, NULL, &img); m_errorMonitor->VerifyFound(); reset_img(); ici.usage = VK_IMAGE_USAGE_SAMPLED_BIT; // external format while tiline other than OPTIMAL ici.tiling = VK_IMAGE_TILING_LINEAR; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-pNext-02398"); vkCreateImage(dev, &ici, NULL, &img); m_errorMonitor->VerifyFound(); reset_img(); ici.tiling = VK_IMAGE_TILING_OPTIMAL; // imageType VkExternalMemoryImageCreateInfo emici = {}; emici.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO; emici.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID; ici.pNext = &emici; // remove efa from chain, insert emici ici.format = VK_FORMAT_R8G8B8A8_UNORM; ici.imageType = VK_IMAGE_TYPE_3D; ici.extent = {64, 64, 64}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-pNext-02393"); vkCreateImage(dev, &ici, NULL, &img); m_errorMonitor->VerifyFound(); reset_img(); // wrong mipLevels ici.imageType = VK_IMAGE_TYPE_2D; ici.extent = {64, 64, 1}; ici.mipLevels = 6; // should be 7 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-pNext-02394"); vkCreateImage(dev, &ici, NULL, &img); m_errorMonitor->VerifyFound(); reset_img(); } TEST_F(VkLayerTest, AndroidHardwareBufferFetchUnboundImageInfo) { TEST_DESCRIPTION("Verify AndroidHardwareBuffer retreive image properties while memory unbound."); SetTargetApiVersion(VK_API_VERSION_1_1); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) && // Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) { m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME); m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME); } else { printf("%s %s extension not supported, skipping tests\n", kSkipPrefix, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkDevice dev = m_device->device(); VkImage img = VK_NULL_HANDLE; auto reset_img = [&img, dev]() { if (VK_NULL_HANDLE != img) vkDestroyImage(dev, img, NULL); img = VK_NULL_HANDLE; }; VkImageCreateInfo ici = {}; ici.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ici.pNext = nullptr; ici.imageType = VK_IMAGE_TYPE_2D; ici.arrayLayers = 1; ici.extent = {64, 64, 1}; ici.format = VK_FORMAT_R8G8B8A8_UNORM; ici.mipLevels = 1; ici.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; ici.samples = VK_SAMPLE_COUNT_1_BIT; ici.tiling = VK_IMAGE_TILING_LINEAR; ici.usage = VK_IMAGE_USAGE_SAMPLED_BIT; VkExternalMemoryImageCreateInfo emici = {}; emici.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO; emici.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID; ici.pNext = &emici; m_errorMonitor->ExpectSuccess(); vkCreateImage(dev, &ici, NULL, &img); m_errorMonitor->VerifyNotFound(); // attempt to fetch layout from unbound image VkImageSubresource sub_rsrc = {}; sub_rsrc.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; VkSubresourceLayout sub_layout = {}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetImageSubresourceLayout-image-01895"); vkGetImageSubresourceLayout(dev, img, &sub_rsrc, &sub_layout); m_errorMonitor->VerifyFound(); // attempt to get memory reqs from unbound image VkImageMemoryRequirementsInfo2 imri = {}; imri.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2; imri.image = img; VkMemoryRequirements2 mem_reqs = {}; mem_reqs.sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryRequirementsInfo2-image-01897"); vkGetImageMemoryRequirements2(dev, &imri, &mem_reqs); m_errorMonitor->VerifyFound(); reset_img(); } TEST_F(VkLayerTest, AndroidHardwareBufferMemoryAllocation) { TEST_DESCRIPTION("Verify AndroidHardwareBuffer memory allocation."); SetTargetApiVersion(VK_API_VERSION_1_1); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) && // Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) { m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME); m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME); } else { printf("%s %s extension not supported, skipping tests\n", kSkipPrefix, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkDevice dev = m_device->device(); VkImage img = VK_NULL_HANDLE; auto reset_img = [&img, dev]() { if (VK_NULL_HANDLE != img) vkDestroyImage(dev, img, NULL); img = VK_NULL_HANDLE; }; VkDeviceMemory mem_handle = VK_NULL_HANDLE; auto reset_mem = [&mem_handle, dev]() { if (VK_NULL_HANDLE != mem_handle) vkFreeMemory(dev, mem_handle, NULL); mem_handle = VK_NULL_HANDLE; }; PFN_vkGetAndroidHardwareBufferPropertiesANDROID pfn_GetAHBProps = (PFN_vkGetAndroidHardwareBufferPropertiesANDROID)vkGetDeviceProcAddr(dev, "vkGetAndroidHardwareBufferPropertiesANDROID"); ASSERT_TRUE(pfn_GetAHBProps != nullptr); // AHB structs AHardwareBuffer *ahb = nullptr; AHardwareBuffer_Desc ahb_desc = {}; VkAndroidHardwareBufferFormatPropertiesANDROID ahb_fmt_props = {}; ahb_fmt_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID; VkAndroidHardwareBufferPropertiesANDROID ahb_props = {}; ahb_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID; ahb_props.pNext = &ahb_fmt_props; VkImportAndroidHardwareBufferInfoANDROID iahbi = {}; iahbi.sType = VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID; // destroy and re-acquire an AHB, and fetch it's properties auto recreate_ahb = [&ahb, &iahbi, &ahb_desc, &ahb_props, dev, pfn_GetAHBProps]() { if (ahb) AHardwareBuffer_release(ahb); ahb = nullptr; AHardwareBuffer_allocate(&ahb_desc, &ahb); pfn_GetAHBProps(dev, ahb, &ahb_props); iahbi.buffer = ahb; }; // Allocate an AHardwareBuffer ahb_desc.format = AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM; ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE; ahb_desc.width = 64; ahb_desc.height = 64; ahb_desc.layers = 1; recreate_ahb(); // Create an image w/ external format VkExternalFormatANDROID efa = {}; efa.sType = VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID; efa.externalFormat = AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM; VkImageCreateInfo ici = {}; ici.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ici.pNext = &efa; ici.imageType = VK_IMAGE_TYPE_2D; ici.arrayLayers = 1; ici.extent = {64, 64, 1}; ici.format = VK_FORMAT_UNDEFINED; ici.mipLevels = 1; ici.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; ici.mipLevels = 1; ici.samples = VK_SAMPLE_COUNT_1_BIT; ici.tiling = VK_IMAGE_TILING_OPTIMAL; ici.usage = VK_IMAGE_USAGE_SAMPLED_BIT; VkResult res = vkCreateImage(dev, &ici, NULL, &img); ASSERT_VK_SUCCESS(res); VkMemoryAllocateInfo mai = {}; mai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mai.pNext = &iahbi; // Chained import struct mai.allocationSize = ahb_props.allocationSize; mai.memoryTypeIndex = 32; // Set index to match one of the bits in ahb_props for (int i = 0; i < 32; i++) { if (ahb_props.memoryTypeBits & (1 << i)) { mai.memoryTypeIndex = i; break; } } ASSERT_NE(32, mai.memoryTypeIndex); // Import w/ non-dedicated memory allocation // Import requires format AHB_FMT_BLOB and usage AHB_USAGE_GPU_DATA_BUFFER m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMemoryAllocateInfo-pNext-02384"); vkAllocateMemory(dev, &mai, NULL, &mem_handle); m_errorMonitor->VerifyFound(); reset_mem(); // Allocation size mismatch ahb_desc.format = AHARDWAREBUFFER_FORMAT_BLOB; ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER | AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE; recreate_ahb(); mai.allocationSize = ahb_props.allocationSize + 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMemoryAllocateInfo-allocationSize-02383"); vkAllocateMemory(dev, &mai, NULL, &mem_handle); m_errorMonitor->VerifyFound(); mai.allocationSize = ahb_props.allocationSize; reset_mem(); // memoryTypeIndex mismatch mai.memoryTypeIndex++; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMemoryAllocateInfo-memoryTypeIndex-02385"); vkAllocateMemory(dev, &mai, NULL, &mem_handle); m_errorMonitor->VerifyFound(); mai.memoryTypeIndex--; reset_mem(); // Insert dedicated image memory allocation to mai chain VkMemoryDedicatedAllocateInfo mdai = {}; mdai.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO; mdai.image = img; mdai.buffer = VK_NULL_HANDLE; mdai.pNext = mai.pNext; mai.pNext = &mdai; // Dedicated allocation with unmatched usage bits ahb_desc.format = AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM; ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT; recreate_ahb(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMemoryAllocateInfo-pNext-02390"); vkAllocateMemory(dev, &mai, NULL, &mem_handle); m_errorMonitor->VerifyFound(); reset_mem(); // Dedicated allocation with incomplete mip chain reset_img(); ici.mipLevels = 2; vkCreateImage(dev, &ici, NULL, &img); mdai.image = img; ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE | AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE; recreate_ahb(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMemoryAllocateInfo-pNext-02389"); vkAllocateMemory(dev, &mai, NULL, &mem_handle); m_errorMonitor->VerifyFound(); reset_mem(); // Dedicated allocation with mis-matched dimension ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE; ahb_desc.height = 32; ahb_desc.width = 128; recreate_ahb(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMemoryAllocateInfo-pNext-02388"); vkAllocateMemory(dev, &mai, NULL, &mem_handle); m_errorMonitor->VerifyFound(); reset_mem(); // Dedicated allocation with mis-matched VkFormat ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE; ahb_desc.height = 64; ahb_desc.width = 64; recreate_ahb(); ici.mipLevels = 1; ici.format = VK_FORMAT_B8G8R8A8_UNORM; ici.pNext = NULL; VkImage img2; vkCreateImage(dev, &ici, NULL, &img2); mdai.image = img2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMemoryAllocateInfo-pNext-02387"); vkAllocateMemory(dev, &mai, NULL, &mem_handle); m_errorMonitor->VerifyFound(); vkDestroyImage(dev, img2, NULL); mdai.image = img; reset_mem(); // Missing required ahb usage ahb_desc.usage = AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetAndroidHardwareBufferPropertiesANDROID-buffer-01884"); recreate_ahb(); m_errorMonitor->VerifyFound(); // Dedicated allocation with missing usage bits // Setting up this test also triggers a slew of others m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMemoryAllocateInfo-pNext-02390"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMemoryAllocateInfo-memoryTypeIndex-02385"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMemoryAllocateInfo-allocationSize-02383"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetAndroidHardwareBufferPropertiesANDROID-buffer-01884"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMemoryAllocateInfo-pNext-02386"); vkAllocateMemory(dev, &mai, NULL, &mem_handle); m_errorMonitor->VerifyFound(); reset_mem(); // Non-import allocation - replace import struct in chain with export struct VkExportMemoryAllocateInfo emai = {}; emai.sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO; emai.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID; mai.pNext = &emai; emai.pNext = &mdai; // still dedicated mdai.pNext = nullptr; // Export with allocation size non-zero ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE; recreate_ahb(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMemoryAllocateInfo-pNext-01874"); vkAllocateMemory(dev, &mai, NULL, &mem_handle); m_errorMonitor->VerifyFound(); reset_mem(); AHardwareBuffer_release(ahb); reset_mem(); reset_img(); } TEST_F(VkLayerTest, AndroidHardwareBufferCreateYCbCrSampler) { TEST_DESCRIPTION("Verify AndroidHardwareBuffer YCbCr sampler creation."); SetTargetApiVersion(VK_API_VERSION_1_1); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) && // Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) { m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME); m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME); } else { printf("%s %s extension not supported, skipping tests\n", kSkipPrefix, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkDevice dev = m_device->device(); VkSamplerYcbcrConversion ycbcr_conv = VK_NULL_HANDLE; VkSamplerYcbcrConversionCreateInfo sycci = {}; sycci.sType = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO; sycci.format = VK_FORMAT_UNDEFINED; sycci.ycbcrModel = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY; sycci.ycbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_FULL; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSamplerYcbcrConversionCreateInfo-format-01904"); vkCreateSamplerYcbcrConversion(dev, &sycci, NULL, &ycbcr_conv); m_errorMonitor->VerifyFound(); VkExternalFormatANDROID efa = {}; efa.sType = VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID; efa.externalFormat = AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM; sycci.format = VK_FORMAT_R8G8B8A8_UNORM; sycci.pNext = &efa; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSamplerYcbcrConversionCreateInfo-format-01904"); vkCreateSamplerYcbcrConversion(dev, &sycci, NULL, &ycbcr_conv); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, AndroidHardwareBufferPhysDevImageFormatProp2) { TEST_DESCRIPTION("Verify AndroidHardwareBuffer GetPhysicalDeviceImageFormatProperties."); SetTargetApiVersion(VK_API_VERSION_1_1); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) && // Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) { m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME); m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME); } else { printf("%s %s extension not supported, skipping test\n", kSkipPrefix, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); if ((m_instance_api_version < VK_API_VERSION_1_1) && !InstanceExtensionEnabled(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { printf("%s %s extension not supported, skipping test\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } VkImageFormatProperties2 ifp = {}; ifp.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2; VkPhysicalDeviceImageFormatInfo2 pdifi = {}; pdifi.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2; pdifi.format = VK_FORMAT_R8G8B8A8_UNORM; pdifi.tiling = VK_IMAGE_TILING_OPTIMAL; pdifi.type = VK_IMAGE_TYPE_2D; pdifi.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; VkAndroidHardwareBufferUsageANDROID ahbu = {}; ahbu.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID; ahbu.androidHardwareBufferUsage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE; ifp.pNext = &ahbu; // AHB_usage chained to input without a matching external image format struc chained to output m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetPhysicalDeviceImageFormatProperties2-pNext-01868"); vkGetPhysicalDeviceImageFormatProperties2(m_device->phy().handle(), &pdifi, &ifp); m_errorMonitor->VerifyFound(); // output struct chained, but does not include VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID usage VkPhysicalDeviceExternalImageFormatInfo pdeifi = {}; pdeifi.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO; pdeifi.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT; pdifi.pNext = &pdeifi; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetPhysicalDeviceImageFormatProperties2-pNext-01868"); vkGetPhysicalDeviceImageFormatProperties2(m_device->phy().handle(), &pdifi, &ifp); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, AndroidHardwareBufferCreateImageView) { TEST_DESCRIPTION("Verify AndroidHardwareBuffer image view creation."); SetTargetApiVersion(VK_API_VERSION_1_1); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) && // Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) { m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME); m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME); } else { printf("%s %s extension not supported, skipping tests\n", kSkipPrefix, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkDevice dev = m_device->device(); // Expect no validation errors during setup m_errorMonitor->ExpectSuccess(); // Allocate an AHB and fetch its properties AHardwareBuffer *ahb = nullptr; AHardwareBuffer_Desc ahb_desc = {}; ahb_desc.format = AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM; ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE; ahb_desc.width = 64; ahb_desc.height = 64; ahb_desc.layers = 1; AHardwareBuffer_allocate(&ahb_desc, &ahb); // Retrieve AHB properties to make it's external format 'known' VkAndroidHardwareBufferFormatPropertiesANDROID ahb_fmt_props = {}; ahb_fmt_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID; VkAndroidHardwareBufferPropertiesANDROID ahb_props = {}; ahb_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID; ahb_props.pNext = &ahb_fmt_props; PFN_vkGetAndroidHardwareBufferPropertiesANDROID pfn_GetAHBProps = (PFN_vkGetAndroidHardwareBufferPropertiesANDROID)vkGetDeviceProcAddr(dev, "vkGetAndroidHardwareBufferPropertiesANDROID"); ASSERT_TRUE(pfn_GetAHBProps != nullptr); pfn_GetAHBProps(dev, ahb, &ahb_props); AHardwareBuffer_release(ahb); // Give image an external format VkExternalFormatANDROID efa = {}; efa.sType = VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID; efa.externalFormat = AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM; // Create the image VkImage img = VK_NULL_HANDLE; VkImageCreateInfo ici = {}; ici.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ici.pNext = &efa; ici.imageType = VK_IMAGE_TYPE_2D; ici.arrayLayers = 1; ici.extent = {64, 64, 1}; ici.format = VK_FORMAT_UNDEFINED; ici.mipLevels = 1; ici.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; ici.samples = VK_SAMPLE_COUNT_1_BIT; ici.tiling = VK_IMAGE_TILING_OPTIMAL; ici.usage = VK_IMAGE_USAGE_SAMPLED_BIT; vkCreateImage(dev, &ici, NULL, &img); // Set up memory allocation VkDeviceMemory img_mem = VK_NULL_HANDLE; VkMemoryAllocateInfo mai = {}; mai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mai.allocationSize = 64 * 64 * 4; mai.memoryTypeIndex = 0; vkAllocateMemory(dev, &mai, NULL, &img_mem); // Bind image to memory vkBindImageMemory(dev, img, img_mem, 0); // Create a YCbCr conversion, with different external format, chain to view VkSamplerYcbcrConversion ycbcr_conv = VK_NULL_HANDLE; VkSamplerYcbcrConversionCreateInfo sycci = {}; efa.externalFormat = AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM; sycci.sType = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO; sycci.pNext = &efa; sycci.format = VK_FORMAT_UNDEFINED; sycci.ycbcrModel = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY; sycci.ycbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_FULL; vkCreateSamplerYcbcrConversion(dev, &sycci, NULL, &ycbcr_conv); VkSamplerYcbcrConversionInfo syci = {}; syci.sType = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO; syci.conversion = ycbcr_conv; // Create a view VkImageView image_view = VK_NULL_HANDLE; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.pNext = &syci; ivci.image = img; ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_UNDEFINED; ivci.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}; auto reset_view = [&image_view, dev]() { if (VK_NULL_HANDLE != image_view) vkDestroyImageView(dev, image_view, NULL); image_view = VK_NULL_HANDLE; }; // Up to this point, no errors expected m_errorMonitor->VerifyNotFound(); // Chained ycbcr conversion has different (external) format than image m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-02400"); // Also causes "unsupported format" - should be removed in future spec update m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-None-02273"); vkCreateImageView(dev, &ivci, NULL, &image_view); m_errorMonitor->VerifyFound(); reset_view(); vkDestroySamplerYcbcrConversion(dev, ycbcr_conv, NULL); efa.externalFormat = AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM; vkCreateSamplerYcbcrConversion(dev, &sycci, NULL, &ycbcr_conv); syci.conversion = ycbcr_conv; // View component swizzle not IDENTITY ivci.components.r = VK_COMPONENT_SWIZZLE_B; ivci.components.b = VK_COMPONENT_SWIZZLE_R; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-02401"); // Also causes "unsupported format" - should be removed in future spec update m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-None-02273"); vkCreateImageView(dev, &ivci, NULL, &image_view); m_errorMonitor->VerifyFound(); reset_view(); ivci.components.r = VK_COMPONENT_SWIZZLE_IDENTITY; ivci.components.b = VK_COMPONENT_SWIZZLE_IDENTITY; // View with external format, when format is not UNDEFINED ivci.format = VK_FORMAT_R5G6B5_UNORM_PACK16; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-02399"); // Also causes "view format different from image format" m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-01019"); vkCreateImageView(dev, &ivci, NULL, &image_view); m_errorMonitor->VerifyFound(); reset_view(); vkDestroySamplerYcbcrConversion(dev, ycbcr_conv, NULL); vkDestroyImageView(dev, image_view, NULL); vkDestroyImage(dev, img, NULL); vkFreeMemory(dev, img_mem, NULL); } TEST_F(VkLayerTest, AndroidHardwareBufferImportBuffer) { TEST_DESCRIPTION("Verify AndroidHardwareBuffer import as buffer."); SetTargetApiVersion(VK_API_VERSION_1_1); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) && // Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) { m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME); m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME); } else { printf("%s %s extension not supported, skipping tests\n", kSkipPrefix, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkDevice dev = m_device->device(); VkDeviceMemory mem_handle = VK_NULL_HANDLE; auto reset_mem = [&mem_handle, dev]() { if (VK_NULL_HANDLE != mem_handle) vkFreeMemory(dev, mem_handle, NULL); mem_handle = VK_NULL_HANDLE; }; PFN_vkGetAndroidHardwareBufferPropertiesANDROID pfn_GetAHBProps = (PFN_vkGetAndroidHardwareBufferPropertiesANDROID)vkGetDeviceProcAddr(dev, "vkGetAndroidHardwareBufferPropertiesANDROID"); ASSERT_TRUE(pfn_GetAHBProps != nullptr); // AHB structs AHardwareBuffer *ahb = nullptr; AHardwareBuffer_Desc ahb_desc = {}; VkAndroidHardwareBufferPropertiesANDROID ahb_props = {}; ahb_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID; VkImportAndroidHardwareBufferInfoANDROID iahbi = {}; iahbi.sType = VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID; // Allocate an AHardwareBuffer ahb_desc.format = AHARDWAREBUFFER_FORMAT_BLOB; ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE; ahb_desc.width = 512; ahb_desc.height = 1; ahb_desc.layers = 1; AHardwareBuffer_allocate(&ahb_desc, &ahb); pfn_GetAHBProps(dev, ahb, &ahb_props); iahbi.buffer = ahb; // Create export and import buffers VkExternalMemoryBufferCreateInfo ext_buf_info = {}; ext_buf_info.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR; ext_buf_info.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT; VkBufferCreateInfo bci = {}; bci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bci.pNext = &ext_buf_info; bci.size = ahb_props.allocationSize; bci.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; VkBuffer buf = VK_NULL_HANDLE; vkCreateBuffer(dev, &bci, NULL, &buf); VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(dev, buf, &mem_reqs); // Allocation info VkMemoryAllocateInfo mai = vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, mem_reqs, 0); mai.pNext = &iahbi; // Chained import struct // Import as buffer requires format AHB_FMT_BLOB and usage AHB_USAGE_GPU_DATA_BUFFER m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImportAndroidHardwareBufferInfoANDROID-buffer-01881"); // Also causes "non-dedicated allocation format/usage" error m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMemoryAllocateInfo-pNext-02384"); vkAllocateMemory(dev, &mai, NULL, &mem_handle); m_errorMonitor->VerifyFound(); AHardwareBuffer_release(ahb); reset_mem(); vkDestroyBuffer(dev, buf, NULL); } TEST_F(VkLayerTest, AndroidHardwareBufferExporttBuffer) { TEST_DESCRIPTION("Verify AndroidHardwareBuffer export memory as AHB."); SetTargetApiVersion(VK_API_VERSION_1_1); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) && // Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) { m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME); m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME); } else { printf("%s %s extension not supported, skipping tests\n", kSkipPrefix, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkDevice dev = m_device->device(); VkDeviceMemory mem_handle = VK_NULL_HANDLE; // Allocate device memory, no linked export struct indicating AHB handle type VkMemoryAllocateInfo mai = {}; mai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mai.allocationSize = 65536; mai.memoryTypeIndex = 0; vkAllocateMemory(dev, &mai, NULL, &mem_handle); PFN_vkGetMemoryAndroidHardwareBufferANDROID pfn_GetMemAHB = (PFN_vkGetMemoryAndroidHardwareBufferANDROID)vkGetDeviceProcAddr(dev, "vkGetMemoryAndroidHardwareBufferANDROID"); ASSERT_TRUE(pfn_GetMemAHB != nullptr); VkMemoryGetAndroidHardwareBufferInfoANDROID mgahbi = {}; mgahbi.sType = VK_STRUCTURE_TYPE_MEMORY_GET_ANDROID_HARDWARE_BUFFER_INFO_ANDROID; mgahbi.memory = mem_handle; AHardwareBuffer *ahb = nullptr; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMemoryGetAndroidHardwareBufferInfoANDROID-handleTypes-01882"); pfn_GetMemAHB(dev, &mgahbi, &ahb); m_errorMonitor->VerifyFound(); if (ahb) AHardwareBuffer_release(ahb); ahb = nullptr; if (VK_NULL_HANDLE != mem_handle) vkFreeMemory(dev, mem_handle, NULL); mem_handle = VK_NULL_HANDLE; // Add an export struct with AHB handle type to allocation info VkExportMemoryAllocateInfo emai = {}; emai.sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO; emai.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID; mai.pNext = &emai; // Create an image, do not bind memory VkImage img = VK_NULL_HANDLE; VkImageCreateInfo ici = {}; ici.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ici.imageType = VK_IMAGE_TYPE_2D; ici.arrayLayers = 1; ici.extent = {128, 128, 1}; ici.format = VK_FORMAT_R8G8B8A8_UNORM; ici.mipLevels = 1; ici.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; ici.samples = VK_SAMPLE_COUNT_1_BIT; ici.tiling = VK_IMAGE_TILING_OPTIMAL; ici.usage = VK_IMAGE_USAGE_SAMPLED_BIT; vkCreateImage(dev, &ici, NULL, &img); ASSERT_TRUE(VK_NULL_HANDLE != img); // Add image to allocation chain as dedicated info, re-allocate VkMemoryDedicatedAllocateInfo mdai = {VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO}; mdai.image = img; emai.pNext = &mdai; mai.allocationSize = 0; vkAllocateMemory(dev, &mai, NULL, &mem_handle); mgahbi.memory = mem_handle; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMemoryGetAndroidHardwareBufferInfoANDROID-pNext-01883"); pfn_GetMemAHB(dev, &mgahbi, &ahb); m_errorMonitor->VerifyFound(); if (ahb) AHardwareBuffer_release(ahb); if (VK_NULL_HANDLE != mem_handle) vkFreeMemory(dev, mem_handle, NULL); vkDestroyImage(dev, img, NULL); } #endif // VK_USE_PLATFORM_ANDROID_KHR TEST_F(VkLayerTest, ViewportSwizzleNV) { TEST_DESCRIPTION("Verify VK_NV_viewprot_swizzle."); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_NV_VIEWPORT_SWIZZLE_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_NV_VIEWPORT_SWIZZLE_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_NV_VIEWPORT_SWIZZLE_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkViewportSwizzleNV invalid_swizzles = { VkViewportCoordinateSwizzleNV(-1), VkViewportCoordinateSwizzleNV(-1), VkViewportCoordinateSwizzleNV(-1), VkViewportCoordinateSwizzleNV(-1), }; VkPipelineViewportSwizzleStateCreateInfoNV vp_swizzle_state = { VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV}; vp_swizzle_state.viewportCount = 1; vp_swizzle_state.pViewportSwizzles = &invalid_swizzles; const std::vector expected_vuids = {"VUID-VkViewportSwizzleNV-x-parameter", "VUID-VkViewportSwizzleNV-y-parameter", "VUID-VkViewportSwizzleNV-z-parameter", "VUID-VkViewportSwizzleNV-w-parameter"}; auto break_swizzles = [&vp_swizzle_state](CreatePipelineHelper &helper) { helper.vp_state_ci_.pNext = &vp_swizzle_state; }; CreatePipelineHelper::OneshotTest(*this, break_swizzles, VK_DEBUG_REPORT_ERROR_BIT_EXT, expected_vuids); struct TestCase { VkBool32 rasterizerDiscardEnable; uint32_t vp_count; uint32_t swizzel_vp_count; bool positive; }; const std::array test_cases = {{{VK_TRUE, 1, 2, true}, {VK_FALSE, 1, 1, true}, {VK_FALSE, 1, 2, false}}}; std::array swizzles = { {{VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV, VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Y_NV, VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Z_NV, VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_W_NV}, {VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV, VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Y_NV, VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Z_NV, VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_W_NV}}}; for (const auto &test_case : test_cases) { assert(test_case.vp_count <= swizzles.size()); vp_swizzle_state.viewportCount = test_case.swizzel_vp_count; vp_swizzle_state.pViewportSwizzles = swizzles.data(); auto break_vp_count = [&vp_swizzle_state, &test_case](CreatePipelineHelper &helper) { helper.rs_state_ci_.rasterizerDiscardEnable = test_case.rasterizerDiscardEnable; helper.vp_state_ci_.viewportCount = test_case.vp_count; helper.vp_state_ci_.pNext = &vp_swizzle_state; }; CreatePipelineHelper::OneshotTest(*this, break_vp_count, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineViewportSwizzleStateCreateInfoNV-viewportCount-01215", test_case.positive); } } TEST_F(VkLayerTest, BufferDeviceAddressEXT) { TEST_DESCRIPTION("Test VK_EXT_buffer_device_address."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); std::array required_device_extensions = {{VK_EXT_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME}}; for (auto device_extension : required_device_extensions) { if (DeviceExtensionSupported(gpu(), nullptr, device_extension)) { m_device_extension_names.push_back(device_extension); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, device_extension); return; } } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vkGetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); // Create a device that enables buffer_device_address auto buffer_device_address_features = lvl_init_struct(); auto features2 = lvl_init_struct(&buffer_device_address_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); buffer_device_address_features.bufferDeviceAddressCaptureReplay = VK_FALSE; ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); PFN_vkGetBufferDeviceAddressEXT vkGetBufferDeviceAddressEXT = (PFN_vkGetBufferDeviceAddressEXT)vkGetInstanceProcAddr(instance(), "vkGetBufferDeviceAddressEXT"); VkBufferCreateInfo buffer_create_info = {VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO}; buffer_create_info.size = sizeof(uint32_t); buffer_create_info.usage = VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT; buffer_create_info.flags = VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_EXT; VkBuffer buffer; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferCreateInfo-flags-02605"); VkResult result = vkCreateBuffer(m_device->device(), &buffer_create_info, nullptr, &buffer); m_errorMonitor->VerifyFound(); if (result == VK_SUCCESS) { vkDestroyBuffer(m_device->device(), buffer, NULL); } buffer_create_info.flags = 0; VkBufferDeviceAddressCreateInfoEXT addr_ci = {VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT}; addr_ci.deviceAddress = 1; buffer_create_info.pNext = &addr_ci; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferCreateInfo-deviceAddress-02604"); result = vkCreateBuffer(m_device->device(), &buffer_create_info, nullptr, &buffer); m_errorMonitor->VerifyFound(); if (result == VK_SUCCESS) { vkDestroyBuffer(m_device->device(), buffer, NULL); } buffer_create_info.pNext = nullptr; result = vkCreateBuffer(m_device->device(), &buffer_create_info, nullptr, &buffer); ASSERT_VK_SUCCESS(result); VkBufferDeviceAddressInfoEXT info = {VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO_EXT}; info.buffer = buffer; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferDeviceAddressInfoEXT-buffer-02600"); vkGetBufferDeviceAddressEXT(m_device->device(), &info); m_errorMonitor->VerifyFound(); vkDestroyBuffer(m_device->device(), buffer, NULL); } TEST_F(VkLayerTest, BufferDeviceAddressEXTDisabled) { TEST_DESCRIPTION("Test VK_EXT_buffer_device_address."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); std::array required_device_extensions = {{VK_EXT_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME}}; for (auto device_extension : required_device_extensions) { if (DeviceExtensionSupported(gpu(), nullptr, device_extension)) { m_device_extension_names.push_back(device_extension); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, device_extension); return; } } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vkGetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); // Create a device that disables buffer_device_address auto buffer_device_address_features = lvl_init_struct(); auto features2 = lvl_init_struct(&buffer_device_address_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); buffer_device_address_features.bufferDeviceAddress = VK_FALSE; buffer_device_address_features.bufferDeviceAddressCaptureReplay = VK_FALSE; ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); PFN_vkGetBufferDeviceAddressEXT vkGetBufferDeviceAddressEXT = (PFN_vkGetBufferDeviceAddressEXT)vkGetInstanceProcAddr(instance(), "vkGetBufferDeviceAddressEXT"); VkBufferCreateInfo buffer_create_info = {VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO}; buffer_create_info.size = sizeof(uint32_t); buffer_create_info.usage = VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT; VkBuffer buffer; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferCreateInfo-usage-02606"); VkResult result = vkCreateBuffer(m_device->device(), &buffer_create_info, nullptr, &buffer); m_errorMonitor->VerifyFound(); if (result == VK_SUCCESS) { vkDestroyBuffer(m_device->device(), buffer, NULL); } buffer_create_info.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT; result = vkCreateBuffer(m_device->device(), &buffer_create_info, nullptr, &buffer); ASSERT_VK_SUCCESS(result); VkBufferDeviceAddressInfoEXT info = {VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO_EXT}; info.buffer = buffer; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetBufferDeviceAddressEXT-None-02598"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferDeviceAddressInfoEXT-buffer-02601"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferDeviceAddressInfoEXT-buffer-02600"); vkGetBufferDeviceAddressEXT(m_device->device(), &info); m_errorMonitor->VerifyFound(); vkDestroyBuffer(m_device->device(), buffer, NULL); } #if defined(ANDROID) && defined(VALIDATION_APK) const char *appTag = "VulkanLayerValidationTests"; static bool initialized = false; static bool active = false; // Convert Intents to argv // Ported from Hologram sample, only difference is flexible key std::vector get_args(android_app &app, const char *intent_extra_data_key) { std::vector args; JavaVM &vm = *app.activity->vm; JNIEnv *p_env; if (vm.AttachCurrentThread(&p_env, nullptr) != JNI_OK) return args; JNIEnv &env = *p_env; jobject activity = app.activity->clazz; jmethodID get_intent_method = env.GetMethodID(env.GetObjectClass(activity), "getIntent", "()Landroid/content/Intent;"); jobject intent = env.CallObjectMethod(activity, get_intent_method); jmethodID get_string_extra_method = env.GetMethodID(env.GetObjectClass(intent), "getStringExtra", "(Ljava/lang/String;)Ljava/lang/String;"); jvalue get_string_extra_args; get_string_extra_args.l = env.NewStringUTF(intent_extra_data_key); jstring extra_str = static_cast(env.CallObjectMethodA(intent, get_string_extra_method, &get_string_extra_args)); std::string args_str; if (extra_str) { const char *extra_utf = env.GetStringUTFChars(extra_str, nullptr); args_str = extra_utf; env.ReleaseStringUTFChars(extra_str, extra_utf); env.DeleteLocalRef(extra_str); } env.DeleteLocalRef(get_string_extra_args.l); env.DeleteLocalRef(intent); vm.DetachCurrentThread(); // split args_str std::stringstream ss(args_str); std::string arg; while (std::getline(ss, arg, ' ')) { if (!arg.empty()) args.push_back(arg); } return args; } void addFullTestCommentIfPresent(const ::testing::TestInfo &test_info, std::string &error_message) { const char *const type_param = test_info.type_param(); const char *const value_param = test_info.value_param(); if (type_param != NULL || value_param != NULL) { error_message.append(", where "); if (type_param != NULL) { error_message.append("TypeParam = ").append(type_param); if (value_param != NULL) error_message.append(" and "); } if (value_param != NULL) { error_message.append("GetParam() = ").append(value_param); } } } // Inspired by https://github.com/google/googletest/blob/master/googletest/docs/AdvancedGuide.md class LogcatPrinter : public ::testing::EmptyTestEventListener { // Called before a test starts. virtual void OnTestStart(const ::testing::TestInfo &test_info) { __android_log_print(ANDROID_LOG_INFO, appTag, "[ RUN ] %s.%s", test_info.test_case_name(), test_info.name()); } // Called after a failed assertion or a SUCCEED() invocation. virtual void OnTestPartResult(const ::testing::TestPartResult &result) { // If the test part succeeded, we don't need to do anything. if (result.type() == ::testing::TestPartResult::kSuccess) return; __android_log_print(ANDROID_LOG_INFO, appTag, "%s in %s:%d %s", result.failed() ? "*** Failure" : "Success", result.file_name(), result.line_number(), result.summary()); } // Called after a test ends. virtual void OnTestEnd(const ::testing::TestInfo &info) { std::string result; if (info.result()->Passed()) { result.append("[ OK ]"); } else { result.append("[ FAILED ]"); } result.append(info.test_case_name()).append(".").append(info.name()); if (info.result()->Failed()) addFullTestCommentIfPresent(info, result); if (::testing::GTEST_FLAG(print_time)) { std::ostringstream os; os << info.result()->elapsed_time(); result.append(" (").append(os.str()).append(" ms)"); } __android_log_print(ANDROID_LOG_INFO, appTag, "%s", result.c_str()); }; }; static int32_t processInput(struct android_app *app, AInputEvent *event) { return 0; } static void processCommand(struct android_app *app, int32_t cmd) { switch (cmd) { case APP_CMD_INIT_WINDOW: { if (app->window) { initialized = true; } break; } case APP_CMD_GAINED_FOCUS: { active = true; break; } case APP_CMD_LOST_FOCUS: { active = false; break; } } } void android_main(struct android_app *app) { int vulkanSupport = InitVulkan(); if (vulkanSupport == 0) { __android_log_print(ANDROID_LOG_INFO, appTag, "==== FAILED ==== No Vulkan support found"); return; } app->onAppCmd = processCommand; app->onInputEvent = processInput; while (1) { int events; struct android_poll_source *source; while (ALooper_pollAll(active ? 0 : -1, NULL, &events, (void **)&source) >= 0) { if (source) { source->process(app, source); } if (app->destroyRequested != 0) { VkTestFramework::Finish(); return; } } if (initialized && active) { // Use the following key to send arguments to gtest, i.e. // --es args "--gtest_filter=-VkLayerTest.foo" const char key[] = "args"; std::vector args = get_args(*app, key); std::string filter = ""; if (args.size() > 0) { __android_log_print(ANDROID_LOG_INFO, appTag, "Intent args = %s", args[0].c_str()); filter += args[0]; } else { __android_log_print(ANDROID_LOG_INFO, appTag, "No Intent args detected"); } int argc = 2; char *argv[] = {(char *)"foo", (char *)filter.c_str()}; __android_log_print(ANDROID_LOG_DEBUG, appTag, "filter = %s", argv[1]); // Route output to files until we can override the gtest output freopen("/sdcard/Android/data/com.example.VulkanLayerValidationTests/files/out.txt", "w", stdout); freopen("/sdcard/Android/data/com.example.VulkanLayerValidationTests/files/err.txt", "w", stderr); ::testing::InitGoogleTest(&argc, argv); ::testing::TestEventListeners &listeners = ::testing::UnitTest::GetInstance()->listeners(); listeners.Append(new LogcatPrinter); VkTestFramework::InitArgs(&argc, argv); ::testing::AddGlobalTestEnvironment(new TestEnvironment); int result = RUN_ALL_TESTS(); if (result != 0) { __android_log_print(ANDROID_LOG_INFO, appTag, "==== Tests FAILED ===="); } else { __android_log_print(ANDROID_LOG_INFO, appTag, "==== Tests PASSED ===="); } VkTestFramework::Finish(); fclose(stdout); fclose(stderr); ANativeActivity_finish(app->activity); return; } } } #endif #if defined(_WIN32) && !defined(NDEBUG) #include #endif int main(int argc, char **argv) { int result; #ifdef ANDROID int vulkanSupport = InitVulkan(); if (vulkanSupport == 0) return 1; #endif #if defined(_WIN32) && !defined(NDEBUG) _CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE); _CrtSetReportFile(_CRT_ASSERT, _CRTDBG_FILE_STDERR); #endif ::testing::InitGoogleTest(&argc, argv); VkTestFramework::InitArgs(&argc, argv); ::testing::AddGlobalTestEnvironment(new TestEnvironment); result = RUN_ALL_TESTS(); VkTestFramework::Finish(); return result; }