1#!/usr/bin/python3 -i 2# 3# Copyright (c) 2015-2019 The Khronos Group Inc. 4# Copyright (c) 2015-2019 Valve Corporation 5# Copyright (c) 2015-2019 LunarG, Inc. 6# Copyright (c) 2015-2019 Google Inc. 7# 8# Licensed under the Apache License, Version 2.0 (the "License"); 9# you may not use this file except in compliance with the License. 10# You may obtain a copy of the License at 11# 12# http://www.apache.org/licenses/LICENSE-2.0 13# 14# Unless required by applicable law or agreed to in writing, software 15# distributed under the License is distributed on an "AS IS" BASIS, 16# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17# See the License for the specific language governing permissions and 18# limitations under the License. 19# 20# Author: Tobin Ehlis <tobine@google.com> 21# Author: Mark Lobodzinski <mark@lunarg.com> 22 23import os,re,sys 24import xml.etree.ElementTree as etree 25from generator import * 26from collections import namedtuple 27from common_codegen import * 28 29# LayerChassisDispatchGeneratorOptions - subclass of GeneratorOptions. 30# 31# Adds options used by LayerChassisDispatchOutputGenerator objects during 32# layer chassis dispatch file generation. 33# 34# Additional members 35# prefixText - list of strings to prefix generated header with 36# (usually a copyright statement + calling convention macros). 37# protectFile - True if multiple inclusion protection should be 38# generated (based on the filename) around the entire header. 39# protectFeature - True if #ifndef..#endif protection should be 40# generated around a feature interface in the header file. 41# genFuncPointers - True if function pointer typedefs should be 42# generated 43# protectProto - If conditional protection should be generated 44# around prototype declarations, set to either '#ifdef' 45# to require opt-in (#ifdef protectProtoStr) or '#ifndef' 46# to require opt-out (#ifndef protectProtoStr). Otherwise 47# set to None. 48# protectProtoStr - #ifdef/#ifndef symbol to use around prototype 49# declarations, if protectProto is set 50# apicall - string to use for the function declaration prefix, 51# such as APICALL on Windows. 52# apientry - string to use for the calling convention macro, 53# in typedefs, such as APIENTRY. 54# apientryp - string to use for the calling convention macro 55# in function pointer typedefs, such as APIENTRYP. 56# indentFuncProto - True if prototype declarations should put each 57# parameter on a separate line 58# indentFuncPointer - True if typedefed function pointers should put each 59# parameter on a separate line 60# alignFuncParam - if nonzero and parameters are being put on a 61# separate line, align parameter names at the specified column 62class LayerChassisDispatchGeneratorOptions(GeneratorOptions): 63 def __init__(self, 64 filename = None, 65 directory = '.', 66 apiname = None, 67 profile = None, 68 versions = '.*', 69 emitversions = '.*', 70 defaultExtensions = None, 71 addExtensions = None, 72 removeExtensions = None, 73 emitExtensions = None, 74 sortProcedure = regSortFeatures, 75 prefixText = "", 76 genFuncPointers = True, 77 protectFile = True, 78 protectFeature = True, 79 apicall = '', 80 apientry = '', 81 apientryp = '', 82 indentFuncProto = True, 83 indentFuncPointer = False, 84 alignFuncParam = 0, 85 expandEnumerants = True): 86 GeneratorOptions.__init__(self, filename, directory, apiname, profile, 87 versions, emitversions, defaultExtensions, 88 addExtensions, removeExtensions, emitExtensions, sortProcedure) 89 self.prefixText = prefixText 90 self.genFuncPointers = genFuncPointers 91 self.protectFile = protectFile 92 self.protectFeature = protectFeature 93 self.apicall = apicall 94 self.apientry = apientry 95 self.apientryp = apientryp 96 self.indentFuncProto = indentFuncProto 97 self.indentFuncPointer = indentFuncPointer 98 self.alignFuncParam = alignFuncParam 99 self.expandEnumerants = expandEnumerants 100 101 102# LayerChassisDispatchOutputGenerator - subclass of OutputGenerator. 103# Generates layer chassis non-dispatchable handle-wrapping code. 104# 105# ---- methods ---- 106# LayerChassisDispatchOutputGenerator(errFile, warnFile, diagFile) - args as for OutputGenerator. Defines additional internal state. 107# ---- methods overriding base class ---- 108# beginFile(genOpts) 109# endFile() 110# beginFeature(interface, emit) 111# endFeature() 112# genCmd(cmdinfo) 113# genStruct() 114# genType() 115class LayerChassisDispatchOutputGenerator(OutputGenerator): 116 """Generate layer chassis handle wrapping code based on XML element attributes""" 117 inline_copyright_message = """ 118// This file is ***GENERATED***. Do Not Edit. 119// See layer_chassis_dispatch_generator.py for modifications. 120 121/* Copyright (c) 2015-2019 The Khronos Group Inc. 122 * Copyright (c) 2015-2019 Valve Corporation 123 * Copyright (c) 2015-2019 LunarG, Inc. 124 * Copyright (c) 2015-2019 Google Inc. 125 * 126 * Licensed under the Apache License, Version 2.0 (the "License"); 127 * you may not use this file except in compliance with the License. 128 * You may obtain a copy of the License at 129 * 130 * http://www.apache.org/licenses/LICENSE-2.0 131 * 132 * Unless required by applicable law or agreed to in writing, software 133 * distributed under the License is distributed on an "AS IS" BASIS, 134 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 135 * See the License for the specific language governing permissions and 136 * limitations under the License. 137 * 138 * Author: Mark Lobodzinski <mark@lunarg.com> 139 */""" 140 141 inline_custom_source_preamble = """ 142VkResult DispatchCreateComputePipelines(ValidationObject *layer_data, 143 VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, 144 const VkComputePipelineCreateInfo *pCreateInfos, 145 const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) { 146 if (!wrap_handles) return layer_data->device_dispatch_table.CreateComputePipelines(device, pipelineCache, createInfoCount, 147 pCreateInfos, pAllocator, pPipelines); 148 safe_VkComputePipelineCreateInfo *local_pCreateInfos = NULL; 149 if (pCreateInfos) { 150 std::lock_guard<std::mutex> lock(dispatch_lock); 151 local_pCreateInfos = new safe_VkComputePipelineCreateInfo[createInfoCount]; 152 for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) { 153 local_pCreateInfos[idx0].initialize(&pCreateInfos[idx0]); 154 if (pCreateInfos[idx0].basePipelineHandle) { 155 local_pCreateInfos[idx0].basePipelineHandle = layer_data->Unwrap(pCreateInfos[idx0].basePipelineHandle); 156 } 157 if (pCreateInfos[idx0].layout) { 158 local_pCreateInfos[idx0].layout = layer_data->Unwrap(pCreateInfos[idx0].layout); 159 } 160 if (pCreateInfos[idx0].stage.module) { 161 local_pCreateInfos[idx0].stage.module = layer_data->Unwrap(pCreateInfos[idx0].stage.module); 162 } 163 } 164 } 165 if (pipelineCache) { 166 std::lock_guard<std::mutex> lock(dispatch_lock); 167 pipelineCache = layer_data->Unwrap(pipelineCache); 168 } 169 170 VkResult result = layer_data->device_dispatch_table.CreateComputePipelines(device, pipelineCache, createInfoCount, 171 local_pCreateInfos->ptr(), pAllocator, pPipelines); 172 delete[] local_pCreateInfos; 173 { 174 std::lock_guard<std::mutex> lock(dispatch_lock); 175 for (uint32_t i = 0; i < createInfoCount; ++i) { 176 if (pPipelines[i] != VK_NULL_HANDLE) { 177 pPipelines[i] = layer_data->WrapNew(pPipelines[i]); 178 } 179 } 180 } 181 return result; 182} 183 184VkResult DispatchCreateGraphicsPipelines(ValidationObject *layer_data, 185 VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, 186 const VkGraphicsPipelineCreateInfo *pCreateInfos, 187 const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) { 188 if (!wrap_handles) return layer_data->device_dispatch_table.CreateGraphicsPipelines(device, pipelineCache, createInfoCount, 189 pCreateInfos, pAllocator, pPipelines); 190 safe_VkGraphicsPipelineCreateInfo *local_pCreateInfos = nullptr; 191 if (pCreateInfos) { 192 local_pCreateInfos = new safe_VkGraphicsPipelineCreateInfo[createInfoCount]; 193 std::lock_guard<std::mutex> lock(dispatch_lock); 194 for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) { 195 bool uses_color_attachment = false; 196 bool uses_depthstencil_attachment = false; 197 { 198 const auto subpasses_uses_it = layer_data->renderpasses_states.find(layer_data->Unwrap(pCreateInfos[idx0].renderPass)); 199 if (subpasses_uses_it != layer_data->renderpasses_states.end()) { 200 const auto &subpasses_uses = subpasses_uses_it->second; 201 if (subpasses_uses.subpasses_using_color_attachment.count(pCreateInfos[idx0].subpass)) 202 uses_color_attachment = true; 203 if (subpasses_uses.subpasses_using_depthstencil_attachment.count(pCreateInfos[idx0].subpass)) 204 uses_depthstencil_attachment = true; 205 } 206 } 207 208 local_pCreateInfos[idx0].initialize(&pCreateInfos[idx0], uses_color_attachment, uses_depthstencil_attachment); 209 210 if (pCreateInfos[idx0].basePipelineHandle) { 211 local_pCreateInfos[idx0].basePipelineHandle = layer_data->Unwrap(pCreateInfos[idx0].basePipelineHandle); 212 } 213 if (pCreateInfos[idx0].layout) { 214 local_pCreateInfos[idx0].layout = layer_data->Unwrap(pCreateInfos[idx0].layout); 215 } 216 if (pCreateInfos[idx0].pStages) { 217 for (uint32_t idx1 = 0; idx1 < pCreateInfos[idx0].stageCount; ++idx1) { 218 if (pCreateInfos[idx0].pStages[idx1].module) { 219 local_pCreateInfos[idx0].pStages[idx1].module = layer_data->Unwrap(pCreateInfos[idx0].pStages[idx1].module); 220 } 221 } 222 } 223 if (pCreateInfos[idx0].renderPass) { 224 local_pCreateInfos[idx0].renderPass = layer_data->Unwrap(pCreateInfos[idx0].renderPass); 225 } 226 } 227 } 228 if (pipelineCache) { 229 std::lock_guard<std::mutex> lock(dispatch_lock); 230 pipelineCache = layer_data->Unwrap(pipelineCache); 231 } 232 233 VkResult result = layer_data->device_dispatch_table.CreateGraphicsPipelines(device, pipelineCache, createInfoCount, 234 local_pCreateInfos->ptr(), pAllocator, pPipelines); 235 delete[] local_pCreateInfos; 236 { 237 std::lock_guard<std::mutex> lock(dispatch_lock); 238 for (uint32_t i = 0; i < createInfoCount; ++i) { 239 if (pPipelines[i] != VK_NULL_HANDLE) { 240 pPipelines[i] = layer_data->WrapNew(pPipelines[i]); 241 } 242 } 243 } 244 return result; 245} 246 247template <typename T> 248static void UpdateCreateRenderPassState(ValidationObject *layer_data, const T *pCreateInfo, VkRenderPass renderPass) { 249 auto &renderpass_state = layer_data->renderpasses_states[renderPass]; 250 251 for (uint32_t subpass = 0; subpass < pCreateInfo->subpassCount; ++subpass) { 252 bool uses_color = false; 253 for (uint32_t i = 0; i < pCreateInfo->pSubpasses[subpass].colorAttachmentCount && !uses_color; ++i) 254 if (pCreateInfo->pSubpasses[subpass].pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) uses_color = true; 255 256 bool uses_depthstencil = false; 257 if (pCreateInfo->pSubpasses[subpass].pDepthStencilAttachment) 258 if (pCreateInfo->pSubpasses[subpass].pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) 259 uses_depthstencil = true; 260 261 if (uses_color) renderpass_state.subpasses_using_color_attachment.insert(subpass); 262 if (uses_depthstencil) renderpass_state.subpasses_using_depthstencil_attachment.insert(subpass); 263 } 264} 265 266VkResult DispatchCreateRenderPass(ValidationObject *layer_data, 267 VkDevice device, const VkRenderPassCreateInfo *pCreateInfo, 268 const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) { 269 VkResult result = layer_data->device_dispatch_table.CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass); 270 if (!wrap_handles) return result; 271 if (VK_SUCCESS == result) { 272 std::lock_guard<std::mutex> lock(dispatch_lock); 273 UpdateCreateRenderPassState(layer_data, pCreateInfo, *pRenderPass); 274 *pRenderPass = layer_data->WrapNew(*pRenderPass); 275 } 276 return result; 277} 278 279VkResult DispatchCreateRenderPass2KHR(ValidationObject *layer_data, 280 VkDevice device, const VkRenderPassCreateInfo2KHR *pCreateInfo, 281 const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) { 282 VkResult result = layer_data->device_dispatch_table.CreateRenderPass2KHR(device, pCreateInfo, pAllocator, pRenderPass); 283 if (!wrap_handles) return result; 284 if (VK_SUCCESS == result) { 285 std::lock_guard<std::mutex> lock(dispatch_lock); 286 UpdateCreateRenderPassState(layer_data, pCreateInfo, *pRenderPass); 287 *pRenderPass = layer_data->WrapNew(*pRenderPass); 288 } 289 return result; 290} 291 292void DispatchDestroyRenderPass(ValidationObject *layer_data, 293 VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) { 294 if (!wrap_handles) return layer_data->device_dispatch_table.DestroyRenderPass(device, renderPass, pAllocator); 295 std::unique_lock<std::mutex> lock(dispatch_lock); 296 uint64_t renderPass_id = reinterpret_cast<uint64_t &>(renderPass); 297 renderPass = (VkRenderPass)unique_id_mapping[renderPass_id]; 298 unique_id_mapping.erase(renderPass_id); 299 lock.unlock(); 300 layer_data->device_dispatch_table.DestroyRenderPass(device, renderPass, pAllocator); 301 302 lock.lock(); 303 layer_data->renderpasses_states.erase(renderPass); 304} 305 306VkResult DispatchCreateSwapchainKHR(ValidationObject *layer_data, 307 VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo, 308 const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) { 309 if (!wrap_handles) return layer_data->device_dispatch_table.CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain); 310 safe_VkSwapchainCreateInfoKHR *local_pCreateInfo = NULL; 311 if (pCreateInfo) { 312 std::lock_guard<std::mutex> lock(dispatch_lock); 313 local_pCreateInfo = new safe_VkSwapchainCreateInfoKHR(pCreateInfo); 314 local_pCreateInfo->oldSwapchain = layer_data->Unwrap(pCreateInfo->oldSwapchain); 315 // Surface is instance-level object 316 local_pCreateInfo->surface = layer_data->Unwrap(pCreateInfo->surface); 317 } 318 319 VkResult result = layer_data->device_dispatch_table.CreateSwapchainKHR(device, local_pCreateInfo->ptr(), pAllocator, pSwapchain); 320 delete local_pCreateInfo; 321 322 if (VK_SUCCESS == result) { 323 std::lock_guard<std::mutex> lock(dispatch_lock); 324 *pSwapchain = layer_data->WrapNew(*pSwapchain); 325 } 326 return result; 327} 328 329VkResult DispatchCreateSharedSwapchainsKHR(ValidationObject *layer_data, 330 VkDevice device, uint32_t swapchainCount, 331 const VkSwapchainCreateInfoKHR *pCreateInfos, 332 const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) { 333 if (!wrap_handles) return layer_data->device_dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, 334 pAllocator, pSwapchains); 335 safe_VkSwapchainCreateInfoKHR *local_pCreateInfos = NULL; 336 { 337 std::lock_guard<std::mutex> lock(dispatch_lock); 338 if (pCreateInfos) { 339 local_pCreateInfos = new safe_VkSwapchainCreateInfoKHR[swapchainCount]; 340 for (uint32_t i = 0; i < swapchainCount; ++i) { 341 local_pCreateInfos[i].initialize(&pCreateInfos[i]); 342 if (pCreateInfos[i].surface) { 343 // Surface is instance-level object 344 local_pCreateInfos[i].surface = layer_data->Unwrap(pCreateInfos[i].surface); 345 } 346 if (pCreateInfos[i].oldSwapchain) { 347 local_pCreateInfos[i].oldSwapchain = layer_data->Unwrap(pCreateInfos[i].oldSwapchain); 348 } 349 } 350 } 351 } 352 VkResult result = layer_data->device_dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, local_pCreateInfos->ptr(), 353 pAllocator, pSwapchains); 354 delete[] local_pCreateInfos; 355 if (VK_SUCCESS == result) { 356 std::lock_guard<std::mutex> lock(dispatch_lock); 357 for (uint32_t i = 0; i < swapchainCount; i++) { 358 pSwapchains[i] = layer_data->WrapNew(pSwapchains[i]); 359 } 360 } 361 return result; 362} 363 364VkResult DispatchGetSwapchainImagesKHR(ValidationObject *layer_data, 365 VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount, 366 VkImage *pSwapchainImages) { 367 if (!wrap_handles) return layer_data->device_dispatch_table.GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages); 368 VkSwapchainKHR wrapped_swapchain_handle = swapchain; 369 if (VK_NULL_HANDLE != swapchain) { 370 std::lock_guard<std::mutex> lock(dispatch_lock); 371 swapchain = layer_data->Unwrap(swapchain); 372 } 373 VkResult result = 374 layer_data->device_dispatch_table.GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages); 375 if ((VK_SUCCESS == result) || (VK_INCOMPLETE == result)) { 376 if ((*pSwapchainImageCount > 0) && pSwapchainImages) { 377 std::lock_guard<std::mutex> lock(dispatch_lock); 378 auto &wrapped_swapchain_image_handles = layer_data->swapchain_wrapped_image_handle_map[wrapped_swapchain_handle]; 379 for (uint32_t i = static_cast<uint32_t>(wrapped_swapchain_image_handles.size()); i < *pSwapchainImageCount; i++) { 380 wrapped_swapchain_image_handles.emplace_back(layer_data->WrapNew(pSwapchainImages[i])); 381 } 382 for (uint32_t i = 0; i < *pSwapchainImageCount; i++) { 383 pSwapchainImages[i] = wrapped_swapchain_image_handles[i]; 384 } 385 } 386 } 387 return result; 388} 389 390void DispatchDestroySwapchainKHR(ValidationObject *layer_data, 391 VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) { 392 if (!wrap_handles) return layer_data->device_dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator); 393 std::unique_lock<std::mutex> lock(dispatch_lock); 394 395 auto &image_array = layer_data->swapchain_wrapped_image_handle_map[swapchain]; 396 for (auto &image_handle : image_array) { 397 unique_id_mapping.erase(HandleToUint64(image_handle)); 398 } 399 layer_data->swapchain_wrapped_image_handle_map.erase(swapchain); 400 401 uint64_t swapchain_id = HandleToUint64(swapchain); 402 swapchain = (VkSwapchainKHR)unique_id_mapping[swapchain_id]; 403 unique_id_mapping.erase(swapchain_id); 404 lock.unlock(); 405 layer_data->device_dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator); 406} 407 408VkResult DispatchQueuePresentKHR(ValidationObject *layer_data, 409 VkQueue queue, const VkPresentInfoKHR *pPresentInfo) { 410 if (!wrap_handles) return layer_data->device_dispatch_table.QueuePresentKHR(queue, pPresentInfo); 411 safe_VkPresentInfoKHR *local_pPresentInfo = NULL; 412 { 413 std::lock_guard<std::mutex> lock(dispatch_lock); 414 if (pPresentInfo) { 415 local_pPresentInfo = new safe_VkPresentInfoKHR(pPresentInfo); 416 if (local_pPresentInfo->pWaitSemaphores) { 417 for (uint32_t index1 = 0; index1 < local_pPresentInfo->waitSemaphoreCount; ++index1) { 418 local_pPresentInfo->pWaitSemaphores[index1] = layer_data->Unwrap(pPresentInfo->pWaitSemaphores[index1]); 419 } 420 } 421 if (local_pPresentInfo->pSwapchains) { 422 for (uint32_t index1 = 0; index1 < local_pPresentInfo->swapchainCount; ++index1) { 423 local_pPresentInfo->pSwapchains[index1] = layer_data->Unwrap(pPresentInfo->pSwapchains[index1]); 424 } 425 } 426 } 427 } 428 VkResult result = layer_data->device_dispatch_table.QueuePresentKHR(queue, local_pPresentInfo->ptr()); 429 430 // pResults is an output array embedded in a structure. The code generator neglects to copy back from the safe_* version, 431 // so handle it as a special case here: 432 if (pPresentInfo && pPresentInfo->pResults) { 433 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; i++) { 434 pPresentInfo->pResults[i] = local_pPresentInfo->pResults[i]; 435 } 436 } 437 delete local_pPresentInfo; 438 return result; 439} 440 441void DispatchDestroyDescriptorPool(ValidationObject *layer_data, VkDevice device, VkDescriptorPool descriptorPool, 442 const VkAllocationCallbacks *pAllocator) { 443 if (!wrap_handles) return layer_data->device_dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator); 444 std::unique_lock<std::mutex> lock(dispatch_lock); 445 446 // remove references to implicitly freed descriptor sets 447 for(auto descriptor_set : layer_data->pool_descriptor_sets_map[descriptorPool]) { 448 unique_id_mapping.erase(reinterpret_cast<uint64_t &>(descriptor_set)); 449 } 450 layer_data->pool_descriptor_sets_map.erase(descriptorPool); 451 452 uint64_t descriptorPool_id = reinterpret_cast<uint64_t &>(descriptorPool); 453 descriptorPool = (VkDescriptorPool)unique_id_mapping[descriptorPool_id]; 454 unique_id_mapping.erase(descriptorPool_id); 455 lock.unlock(); 456 layer_data->device_dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator); 457} 458 459VkResult DispatchResetDescriptorPool(ValidationObject *layer_data, VkDevice device, VkDescriptorPool descriptorPool, 460 VkDescriptorPoolResetFlags flags) { 461 if (!wrap_handles) return layer_data->device_dispatch_table.ResetDescriptorPool(device, descriptorPool, flags); 462 VkDescriptorPool local_descriptor_pool = VK_NULL_HANDLE; 463 { 464 std::lock_guard<std::mutex> lock(dispatch_lock); 465 local_descriptor_pool = layer_data->Unwrap(descriptorPool); 466 } 467 VkResult result = layer_data->device_dispatch_table.ResetDescriptorPool(device, local_descriptor_pool, flags); 468 if (VK_SUCCESS == result) { 469 std::lock_guard<std::mutex> lock(dispatch_lock); 470 // remove references to implicitly freed descriptor sets 471 for(auto descriptor_set : layer_data->pool_descriptor_sets_map[descriptorPool]) { 472 unique_id_mapping.erase(reinterpret_cast<uint64_t &>(descriptor_set)); 473 } 474 layer_data->pool_descriptor_sets_map[descriptorPool].clear(); 475 } 476 477 return result; 478} 479 480VkResult DispatchAllocateDescriptorSets(ValidationObject *layer_data, VkDevice device, 481 const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) { 482 if (!wrap_handles) return layer_data->device_dispatch_table.AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets); 483 safe_VkDescriptorSetAllocateInfo *local_pAllocateInfo = NULL; 484 { 485 std::lock_guard<std::mutex> lock(dispatch_lock); 486 if (pAllocateInfo) { 487 local_pAllocateInfo = new safe_VkDescriptorSetAllocateInfo(pAllocateInfo); 488 if (pAllocateInfo->descriptorPool) { 489 local_pAllocateInfo->descriptorPool = layer_data->Unwrap(pAllocateInfo->descriptorPool); 490 } 491 if (local_pAllocateInfo->pSetLayouts) { 492 for (uint32_t index1 = 0; index1 < local_pAllocateInfo->descriptorSetCount; ++index1) { 493 local_pAllocateInfo->pSetLayouts[index1] = layer_data->Unwrap(local_pAllocateInfo->pSetLayouts[index1]); 494 } 495 } 496 } 497 } 498 VkResult result = layer_data->device_dispatch_table.AllocateDescriptorSets( 499 device, (const VkDescriptorSetAllocateInfo *)local_pAllocateInfo, pDescriptorSets); 500 if (local_pAllocateInfo) { 501 delete local_pAllocateInfo; 502 } 503 if (VK_SUCCESS == result) { 504 std::lock_guard<std::mutex> lock(dispatch_lock); 505 auto &pool_descriptor_sets = layer_data->pool_descriptor_sets_map[pAllocateInfo->descriptorPool]; 506 for (uint32_t index0 = 0; index0 < pAllocateInfo->descriptorSetCount; index0++) { 507 pDescriptorSets[index0] = layer_data->WrapNew(pDescriptorSets[index0]); 508 pool_descriptor_sets.insert(pDescriptorSets[index0]); 509 } 510 } 511 return result; 512} 513 514VkResult DispatchFreeDescriptorSets(ValidationObject *layer_data, VkDevice device, VkDescriptorPool descriptorPool, 515 uint32_t descriptorSetCount, const VkDescriptorSet *pDescriptorSets) { 516 if (!wrap_handles) 517 return layer_data->device_dispatch_table.FreeDescriptorSets(device, descriptorPool, descriptorSetCount, pDescriptorSets); 518 VkDescriptorSet *local_pDescriptorSets = NULL; 519 VkDescriptorPool local_descriptor_pool = VK_NULL_HANDLE; 520 { 521 std::lock_guard<std::mutex> lock(dispatch_lock); 522 local_descriptor_pool = layer_data->Unwrap(descriptorPool); 523 if (pDescriptorSets) { 524 local_pDescriptorSets = new VkDescriptorSet[descriptorSetCount]; 525 for (uint32_t index0 = 0; index0 < descriptorSetCount; ++index0) { 526 local_pDescriptorSets[index0] = layer_data->Unwrap(pDescriptorSets[index0]); 527 } 528 } 529 } 530 VkResult result = layer_data->device_dispatch_table.FreeDescriptorSets(device, local_descriptor_pool, descriptorSetCount, 531 (const VkDescriptorSet *)local_pDescriptorSets); 532 if (local_pDescriptorSets) delete[] local_pDescriptorSets; 533 if ((VK_SUCCESS == result) && (pDescriptorSets)) { 534 std::unique_lock<std::mutex> lock(dispatch_lock); 535 auto &pool_descriptor_sets = layer_data->pool_descriptor_sets_map[descriptorPool]; 536 for (uint32_t index0 = 0; index0 < descriptorSetCount; index0++) { 537 VkDescriptorSet handle = pDescriptorSets[index0]; 538 pool_descriptor_sets.erase(handle); 539 uint64_t unique_id = reinterpret_cast<uint64_t &>(handle); 540 unique_id_mapping.erase(unique_id); 541 } 542 } 543 return result; 544} 545 546 547// This is the core version of this routine. The extension version is below. 548VkResult DispatchCreateDescriptorUpdateTemplate(ValidationObject *layer_data, 549 VkDevice device, 550 const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo, 551 const VkAllocationCallbacks *pAllocator, 552 VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) { 553 if (!wrap_handles) return layer_data->device_dispatch_table.CreateDescriptorUpdateTemplate(device, pCreateInfo, pAllocator, 554 pDescriptorUpdateTemplate); 555 safe_VkDescriptorUpdateTemplateCreateInfo *local_create_info = NULL; 556 { 557 std::lock_guard<std::mutex> lock(dispatch_lock); 558 if (pCreateInfo) { 559 local_create_info = new safe_VkDescriptorUpdateTemplateCreateInfo(pCreateInfo); 560 if (pCreateInfo->descriptorSetLayout) { 561 local_create_info->descriptorSetLayout = layer_data->Unwrap(pCreateInfo->descriptorSetLayout); 562 } 563 if (pCreateInfo->pipelineLayout) { 564 local_create_info->pipelineLayout = layer_data->Unwrap(pCreateInfo->pipelineLayout); 565 } 566 } 567 } 568 VkResult result = layer_data->device_dispatch_table.CreateDescriptorUpdateTemplate(device, local_create_info->ptr(), pAllocator, 569 pDescriptorUpdateTemplate); 570 if (VK_SUCCESS == result) { 571 std::lock_guard<std::mutex> lock(dispatch_lock); 572 *pDescriptorUpdateTemplate = layer_data->WrapNew(*pDescriptorUpdateTemplate); 573 574 // Shadow template createInfo for later updates 575 std::unique_ptr<TEMPLATE_STATE> template_state(new TEMPLATE_STATE(*pDescriptorUpdateTemplate, local_create_info)); 576 layer_data->desc_template_map[(uint64_t)*pDescriptorUpdateTemplate] = std::move(template_state); 577 } 578 return result; 579} 580 581// This is the extension version of this routine. The core version is above. 582VkResult DispatchCreateDescriptorUpdateTemplateKHR(ValidationObject *layer_data, 583 VkDevice device, 584 const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo, 585 const VkAllocationCallbacks *pAllocator, 586 VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) { 587 if (!wrap_handles) return layer_data->device_dispatch_table.CreateDescriptorUpdateTemplateKHR(device, pCreateInfo, pAllocator, 588 pDescriptorUpdateTemplate); 589 safe_VkDescriptorUpdateTemplateCreateInfo *local_create_info = NULL; 590 { 591 std::lock_guard<std::mutex> lock(dispatch_lock); 592 if (pCreateInfo) { 593 local_create_info = new safe_VkDescriptorUpdateTemplateCreateInfo(pCreateInfo); 594 if (pCreateInfo->descriptorSetLayout) { 595 local_create_info->descriptorSetLayout = layer_data->Unwrap(pCreateInfo->descriptorSetLayout); 596 } 597 if (pCreateInfo->pipelineLayout) { 598 local_create_info->pipelineLayout = layer_data->Unwrap(pCreateInfo->pipelineLayout); 599 } 600 } 601 } 602 VkResult result = layer_data->device_dispatch_table.CreateDescriptorUpdateTemplateKHR(device, local_create_info->ptr(), pAllocator, 603 pDescriptorUpdateTemplate); 604 if (VK_SUCCESS == result) { 605 std::lock_guard<std::mutex> lock(dispatch_lock); 606 *pDescriptorUpdateTemplate = layer_data->WrapNew(*pDescriptorUpdateTemplate); 607 608 // Shadow template createInfo for later updates 609 std::unique_ptr<TEMPLATE_STATE> template_state(new TEMPLATE_STATE(*pDescriptorUpdateTemplate, local_create_info)); 610 layer_data->desc_template_map[(uint64_t)*pDescriptorUpdateTemplate] = std::move(template_state); 611 } 612 return result; 613} 614 615// This is the core version of this routine. The extension version is below. 616void DispatchDestroyDescriptorUpdateTemplate(ValidationObject *layer_data, 617 VkDevice device, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, 618 const VkAllocationCallbacks *pAllocator) { 619 if (!wrap_handles) return layer_data->device_dispatch_table.DestroyDescriptorUpdateTemplate(device, descriptorUpdateTemplate, pAllocator); 620 std::unique_lock<std::mutex> lock(dispatch_lock); 621 uint64_t descriptor_update_template_id = reinterpret_cast<uint64_t &>(descriptorUpdateTemplate); 622 layer_data->desc_template_map.erase(descriptor_update_template_id); 623 descriptorUpdateTemplate = (VkDescriptorUpdateTemplate)unique_id_mapping[descriptor_update_template_id]; 624 unique_id_mapping.erase(descriptor_update_template_id); 625 lock.unlock(); 626 layer_data->device_dispatch_table.DestroyDescriptorUpdateTemplate(device, descriptorUpdateTemplate, pAllocator); 627} 628 629// This is the extension version of this routine. The core version is above. 630void DispatchDestroyDescriptorUpdateTemplateKHR(ValidationObject *layer_data, 631 VkDevice device, 632 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, 633 const VkAllocationCallbacks *pAllocator) { 634 if (!wrap_handles) return layer_data->device_dispatch_table.DestroyDescriptorUpdateTemplateKHR(device, descriptorUpdateTemplate, pAllocator); 635 std::unique_lock<std::mutex> lock(dispatch_lock); 636 uint64_t descriptor_update_template_id = reinterpret_cast<uint64_t &>(descriptorUpdateTemplate); 637 layer_data->desc_template_map.erase(descriptor_update_template_id); 638 descriptorUpdateTemplate = (VkDescriptorUpdateTemplate)unique_id_mapping[descriptor_update_template_id]; 639 unique_id_mapping.erase(descriptor_update_template_id); 640 lock.unlock(); 641 layer_data->device_dispatch_table.DestroyDescriptorUpdateTemplateKHR(device, descriptorUpdateTemplate, pAllocator); 642} 643 644void *BuildUnwrappedUpdateTemplateBuffer(ValidationObject *layer_data, uint64_t descriptorUpdateTemplate, const void *pData) { 645 auto const template_map_entry = layer_data->desc_template_map.find(descriptorUpdateTemplate); 646 if (template_map_entry == layer_data->desc_template_map.end()) { 647 assert(0); 648 } 649 auto const &create_info = template_map_entry->second->create_info; 650 size_t allocation_size = 0; 651 std::vector<std::tuple<size_t, VulkanObjectType, void *, size_t>> template_entries; 652 653 for (uint32_t i = 0; i < create_info.descriptorUpdateEntryCount; i++) { 654 for (uint32_t j = 0; j < create_info.pDescriptorUpdateEntries[i].descriptorCount; j++) { 655 size_t offset = create_info.pDescriptorUpdateEntries[i].offset + j * create_info.pDescriptorUpdateEntries[i].stride; 656 char *update_entry = (char *)(pData) + offset; 657 658 switch (create_info.pDescriptorUpdateEntries[i].descriptorType) { 659 case VK_DESCRIPTOR_TYPE_SAMPLER: 660 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: 661 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: 662 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: 663 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: { 664 auto image_entry = reinterpret_cast<VkDescriptorImageInfo *>(update_entry); 665 allocation_size = std::max(allocation_size, offset + sizeof(VkDescriptorImageInfo)); 666 667 VkDescriptorImageInfo *wrapped_entry = new VkDescriptorImageInfo(*image_entry); 668 wrapped_entry->sampler = layer_data->Unwrap(image_entry->sampler); 669 wrapped_entry->imageView = layer_data->Unwrap(image_entry->imageView); 670 template_entries.emplace_back(offset, kVulkanObjectTypeImage, reinterpret_cast<void *>(wrapped_entry), 0); 671 } break; 672 673 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: 674 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: 675 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: 676 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: { 677 auto buffer_entry = reinterpret_cast<VkDescriptorBufferInfo *>(update_entry); 678 allocation_size = std::max(allocation_size, offset + sizeof(VkDescriptorBufferInfo)); 679 680 VkDescriptorBufferInfo *wrapped_entry = new VkDescriptorBufferInfo(*buffer_entry); 681 wrapped_entry->buffer = layer_data->Unwrap(buffer_entry->buffer); 682 template_entries.emplace_back(offset, kVulkanObjectTypeBuffer, reinterpret_cast<void *>(wrapped_entry), 0); 683 } break; 684 685 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: 686 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: { 687 auto buffer_view_handle = reinterpret_cast<VkBufferView *>(update_entry); 688 allocation_size = std::max(allocation_size, offset + sizeof(VkBufferView)); 689 690 VkBufferView wrapped_entry = layer_data->Unwrap(*buffer_view_handle); 691 template_entries.emplace_back(offset, kVulkanObjectTypeBufferView, reinterpret_cast<void *>(wrapped_entry), 0); 692 } break; 693 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT: { 694 size_t numBytes = create_info.pDescriptorUpdateEntries[i].descriptorCount; 695 allocation_size = std::max(allocation_size, offset + numBytes); 696 // nothing to unwrap, just plain data 697 template_entries.emplace_back(offset, kVulkanObjectTypeUnknown, reinterpret_cast<void *>(update_entry), 698 numBytes); 699 // to break out of the loop 700 j = create_info.pDescriptorUpdateEntries[i].descriptorCount; 701 } break; 702 default: 703 assert(0); 704 break; 705 } 706 } 707 } 708 // Allocate required buffer size and populate with source/unwrapped data 709 void *unwrapped_data = malloc(allocation_size); 710 for (auto &this_entry : template_entries) { 711 VulkanObjectType type = std::get<1>(this_entry); 712 void *destination = (char *)unwrapped_data + std::get<0>(this_entry); 713 void *source = (char *)std::get<2>(this_entry); 714 size_t size = std::get<3>(this_entry); 715 716 if (size != 0) { 717 assert(type == kVulkanObjectTypeUnknown); 718 memcpy(destination, source, size); 719 } else { 720 switch (type) { 721 case kVulkanObjectTypeImage: 722 *(reinterpret_cast<VkDescriptorImageInfo *>(destination)) = 723 *(reinterpret_cast<VkDescriptorImageInfo *>(source)); 724 delete reinterpret_cast<VkDescriptorImageInfo *>(source); 725 break; 726 case kVulkanObjectTypeBuffer: 727 *(reinterpret_cast<VkDescriptorBufferInfo *>(destination)) = 728 *(reinterpret_cast<VkDescriptorBufferInfo *>(source)); 729 delete reinterpret_cast<VkDescriptorBufferInfo *>(source); 730 break; 731 case kVulkanObjectTypeBufferView: 732 *(reinterpret_cast<VkBufferView *>(destination)) = reinterpret_cast<VkBufferView>(source); 733 break; 734 default: 735 assert(0); 736 break; 737 } 738 } 739 } 740 return (void *)unwrapped_data; 741} 742 743void DispatchUpdateDescriptorSetWithTemplate(ValidationObject *layer_data, 744 VkDevice device, VkDescriptorSet descriptorSet, 745 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, 746 const void *pData) { 747 if (!wrap_handles) return layer_data->device_dispatch_table.UpdateDescriptorSetWithTemplate(device, descriptorSet, descriptorUpdateTemplate, pData); 748 uint64_t template_handle = reinterpret_cast<uint64_t &>(descriptorUpdateTemplate); 749 { 750 std::lock_guard<std::mutex> lock(dispatch_lock); 751 descriptorSet = layer_data->Unwrap(descriptorSet); 752 descriptorUpdateTemplate = (VkDescriptorUpdateTemplate)unique_id_mapping[template_handle]; 753 } 754 void *unwrapped_buffer = BuildUnwrappedUpdateTemplateBuffer(layer_data, template_handle, pData); 755 layer_data->device_dispatch_table.UpdateDescriptorSetWithTemplate(device, descriptorSet, descriptorUpdateTemplate, unwrapped_buffer); 756 free(unwrapped_buffer); 757} 758 759void DispatchUpdateDescriptorSetWithTemplateKHR(ValidationObject *layer_data, 760 VkDevice device, VkDescriptorSet descriptorSet, 761 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, 762 const void *pData) { 763 if (!wrap_handles) return layer_data->device_dispatch_table.UpdateDescriptorSetWithTemplateKHR(device, descriptorSet, descriptorUpdateTemplate, pData); 764 uint64_t template_handle = reinterpret_cast<uint64_t &>(descriptorUpdateTemplate); 765 void *unwrapped_buffer = nullptr; 766 { 767 std::lock_guard<std::mutex> lock(dispatch_lock); 768 descriptorSet = layer_data->Unwrap(descriptorSet); 769 descriptorUpdateTemplate = (VkDescriptorUpdateTemplate)unique_id_mapping[template_handle]; 770 unwrapped_buffer = BuildUnwrappedUpdateTemplateBuffer(layer_data, template_handle, pData); 771 } 772 layer_data->device_dispatch_table.UpdateDescriptorSetWithTemplateKHR(device, descriptorSet, descriptorUpdateTemplate, unwrapped_buffer); 773 free(unwrapped_buffer); 774} 775 776void DispatchCmdPushDescriptorSetWithTemplateKHR(ValidationObject *layer_data, 777 VkCommandBuffer commandBuffer, 778 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, 779 VkPipelineLayout layout, uint32_t set, const void *pData) { 780 if (!wrap_handles) return layer_data->device_dispatch_table.CmdPushDescriptorSetWithTemplateKHR(commandBuffer, descriptorUpdateTemplate, layout, set, pData); 781 uint64_t template_handle = reinterpret_cast<uint64_t &>(descriptorUpdateTemplate); 782 void *unwrapped_buffer = nullptr; 783 { 784 std::lock_guard<std::mutex> lock(dispatch_lock); 785 descriptorUpdateTemplate = layer_data->Unwrap(descriptorUpdateTemplate); 786 layout = layer_data->Unwrap(layout); 787 unwrapped_buffer = BuildUnwrappedUpdateTemplateBuffer(layer_data, template_handle, pData); 788 } 789 layer_data->device_dispatch_table.CmdPushDescriptorSetWithTemplateKHR(commandBuffer, descriptorUpdateTemplate, layout, set, 790 unwrapped_buffer); 791 free(unwrapped_buffer); 792} 793 794VkResult DispatchGetPhysicalDeviceDisplayPropertiesKHR(ValidationObject *layer_data, 795 VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount, 796 VkDisplayPropertiesKHR *pProperties) { 797 VkResult result = 798 layer_data->instance_dispatch_table.GetPhysicalDeviceDisplayPropertiesKHR(physicalDevice, pPropertyCount, pProperties); 799 if (!wrap_handles) return result; 800 if ((result == VK_SUCCESS || result == VK_INCOMPLETE) && pProperties) { 801 std::lock_guard<std::mutex> lock(dispatch_lock); 802 for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) { 803 pProperties[idx0].display = layer_data->MaybeWrapDisplay(pProperties[idx0].display, layer_data); 804 } 805 } 806 return result; 807} 808 809VkResult DispatchGetPhysicalDeviceDisplayProperties2KHR(ValidationObject *layer_data, 810 VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount, 811 VkDisplayProperties2KHR *pProperties) { 812 VkResult result = 813 layer_data->instance_dispatch_table.GetPhysicalDeviceDisplayProperties2KHR(physicalDevice, pPropertyCount, pProperties); 814 if (!wrap_handles) return result; 815 if ((result == VK_SUCCESS || result == VK_INCOMPLETE) && pProperties) { 816 std::lock_guard<std::mutex> lock(dispatch_lock); 817 for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) { 818 pProperties[idx0].displayProperties.display = 819 layer_data->MaybeWrapDisplay(pProperties[idx0].displayProperties.display, layer_data); 820 } 821 } 822 return result; 823} 824 825VkResult DispatchGetPhysicalDeviceDisplayPlanePropertiesKHR(ValidationObject *layer_data, 826 VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount, 827 VkDisplayPlanePropertiesKHR *pProperties) { 828 829 VkResult result = 830 layer_data->instance_dispatch_table.GetPhysicalDeviceDisplayPlanePropertiesKHR(physicalDevice, pPropertyCount, pProperties); 831 if (!wrap_handles) return result; 832 if ((result == VK_SUCCESS || result == VK_INCOMPLETE) && pProperties) { 833 std::lock_guard<std::mutex> lock(dispatch_lock); 834 for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) { 835 VkDisplayKHR &opt_display = pProperties[idx0].currentDisplay; 836 if (opt_display) opt_display = layer_data->MaybeWrapDisplay(opt_display, layer_data); 837 } 838 } 839 return result; 840} 841 842VkResult DispatchGetPhysicalDeviceDisplayPlaneProperties2KHR(ValidationObject *layer_data,VkPhysicalDevice physicalDevice, 843 uint32_t *pPropertyCount, 844 VkDisplayPlaneProperties2KHR *pProperties) { 845 846 VkResult result = 847 layer_data->instance_dispatch_table.GetPhysicalDeviceDisplayPlaneProperties2KHR(physicalDevice, pPropertyCount, pProperties); 848 if (!wrap_handles) return result; 849 if ((result == VK_SUCCESS || result == VK_INCOMPLETE) && pProperties) { 850 std::lock_guard<std::mutex> lock(dispatch_lock); 851 for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) { 852 VkDisplayKHR &opt_display = pProperties[idx0].displayPlaneProperties.currentDisplay; 853 if (opt_display) opt_display = layer_data->MaybeWrapDisplay(opt_display, layer_data); 854 } 855 } 856 return result; 857} 858 859VkResult DispatchGetDisplayPlaneSupportedDisplaysKHR(ValidationObject *layer_data, 860 VkPhysicalDevice physicalDevice, uint32_t planeIndex, 861 uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) { 862 VkResult result = 863 layer_data->instance_dispatch_table.GetDisplayPlaneSupportedDisplaysKHR(physicalDevice, planeIndex, pDisplayCount, pDisplays); 864 if ((result == VK_SUCCESS || result == VK_INCOMPLETE) && pDisplays) { 865 if (!wrap_handles) return result; 866 std::lock_guard<std::mutex> lock(dispatch_lock); 867 for (uint32_t i = 0; i < *pDisplayCount; ++i) { 868 if (pDisplays[i]) pDisplays[i] = layer_data->MaybeWrapDisplay(pDisplays[i], layer_data); 869 } 870 } 871 return result; 872} 873 874VkResult DispatchGetDisplayModePropertiesKHR(ValidationObject *layer_data, 875 VkPhysicalDevice physicalDevice, VkDisplayKHR display, 876 uint32_t *pPropertyCount, VkDisplayModePropertiesKHR *pProperties) { 877 if (!wrap_handles) return layer_data->instance_dispatch_table.GetDisplayModePropertiesKHR(physicalDevice, display, pPropertyCount, pProperties); 878 { 879 std::lock_guard<std::mutex> lock(dispatch_lock); 880 display = layer_data->Unwrap(display); 881 } 882 883 VkResult result = layer_data->instance_dispatch_table.GetDisplayModePropertiesKHR(physicalDevice, display, pPropertyCount, pProperties); 884 if ((result == VK_SUCCESS || result == VK_INCOMPLETE) && pProperties) { 885 std::lock_guard<std::mutex> lock(dispatch_lock); 886 for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) { 887 pProperties[idx0].displayMode = layer_data->WrapNew(pProperties[idx0].displayMode); 888 } 889 } 890 return result; 891} 892 893VkResult DispatchGetDisplayModeProperties2KHR(ValidationObject *layer_data, 894 VkPhysicalDevice physicalDevice, VkDisplayKHR display, 895 uint32_t *pPropertyCount, VkDisplayModeProperties2KHR *pProperties) { 896 if (!wrap_handles) return layer_data->instance_dispatch_table.GetDisplayModeProperties2KHR(physicalDevice, display, pPropertyCount, pProperties); 897 { 898 std::lock_guard<std::mutex> lock(dispatch_lock); 899 display = layer_data->Unwrap(display); 900 } 901 902 VkResult result = 903 layer_data->instance_dispatch_table.GetDisplayModeProperties2KHR(physicalDevice, display, pPropertyCount, pProperties); 904 if ((result == VK_SUCCESS || result == VK_INCOMPLETE) && pProperties) { 905 std::lock_guard<std::mutex> lock(dispatch_lock); 906 for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) { 907 pProperties[idx0].displayModeProperties.displayMode = layer_data->WrapNew(pProperties[idx0].displayModeProperties.displayMode); 908 } 909 } 910 return result; 911} 912 913VkResult DispatchDebugMarkerSetObjectTagEXT(ValidationObject *layer_data, 914 VkDevice device, const VkDebugMarkerObjectTagInfoEXT *pTagInfo) { 915 if (!wrap_handles) return layer_data->device_dispatch_table.DebugMarkerSetObjectTagEXT(device, pTagInfo); 916 safe_VkDebugMarkerObjectTagInfoEXT local_tag_info(pTagInfo); 917 { 918 std::lock_guard<std::mutex> lock(dispatch_lock); 919 auto it = unique_id_mapping.find(reinterpret_cast<uint64_t &>(local_tag_info.object)); 920 if (it != unique_id_mapping.end()) { 921 local_tag_info.object = it->second; 922 } 923 } 924 VkResult result = layer_data->device_dispatch_table.DebugMarkerSetObjectTagEXT(device, 925 reinterpret_cast<VkDebugMarkerObjectTagInfoEXT *>(&local_tag_info)); 926 return result; 927} 928 929VkResult DispatchDebugMarkerSetObjectNameEXT(ValidationObject *layer_data, 930 VkDevice device, const VkDebugMarkerObjectNameInfoEXT *pNameInfo) { 931 if (!wrap_handles) return layer_data->device_dispatch_table.DebugMarkerSetObjectNameEXT(device, pNameInfo); 932 safe_VkDebugMarkerObjectNameInfoEXT local_name_info(pNameInfo); 933 { 934 std::lock_guard<std::mutex> lock(dispatch_lock); 935 auto it = unique_id_mapping.find(reinterpret_cast<uint64_t &>(local_name_info.object)); 936 if (it != unique_id_mapping.end()) { 937 local_name_info.object = it->second; 938 } 939 } 940 VkResult result = layer_data->device_dispatch_table.DebugMarkerSetObjectNameEXT( 941 device, reinterpret_cast<VkDebugMarkerObjectNameInfoEXT *>(&local_name_info)); 942 return result; 943} 944 945// VK_EXT_debug_utils 946VkResult DispatchSetDebugUtilsObjectTagEXT(ValidationObject *layer_data, 947 VkDevice device, const VkDebugUtilsObjectTagInfoEXT *pTagInfo) { 948 if (!wrap_handles) return layer_data->device_dispatch_table.SetDebugUtilsObjectTagEXT(device, pTagInfo); 949 safe_VkDebugUtilsObjectTagInfoEXT local_tag_info(pTagInfo); 950 { 951 std::lock_guard<std::mutex> lock(dispatch_lock); 952 auto it = unique_id_mapping.find(reinterpret_cast<uint64_t &>(local_tag_info.objectHandle)); 953 if (it != unique_id_mapping.end()) { 954 local_tag_info.objectHandle = it->second; 955 } 956 } 957 VkResult result = layer_data->device_dispatch_table.SetDebugUtilsObjectTagEXT( 958 device, reinterpret_cast<const VkDebugUtilsObjectTagInfoEXT *>(&local_tag_info)); 959 return result; 960} 961 962VkResult DispatchSetDebugUtilsObjectNameEXT(ValidationObject *layer_data, 963 VkDevice device, const VkDebugUtilsObjectNameInfoEXT *pNameInfo) { 964 if (!wrap_handles) return layer_data->device_dispatch_table.SetDebugUtilsObjectNameEXT(device, pNameInfo); 965 safe_VkDebugUtilsObjectNameInfoEXT local_name_info(pNameInfo); 966 { 967 std::lock_guard<std::mutex> lock(dispatch_lock); 968 auto it = unique_id_mapping.find(reinterpret_cast<uint64_t &>(local_name_info.objectHandle)); 969 if (it != unique_id_mapping.end()) { 970 local_name_info.objectHandle = it->second; 971 } 972 } 973 VkResult result = layer_data->device_dispatch_table.SetDebugUtilsObjectNameEXT( 974 device, reinterpret_cast<const VkDebugUtilsObjectNameInfoEXT *>(&local_name_info)); 975 return result; 976} 977 978""" 979 # Separate generated text for source and headers 980 ALL_SECTIONS = ['source_file', 'header_file'] 981 def __init__(self, 982 errFile = sys.stderr, 983 warnFile = sys.stderr, 984 diagFile = sys.stdout): 985 OutputGenerator.__init__(self, errFile, warnFile, diagFile) 986 self.INDENT_SPACES = 4 987 self.instance_extensions = [] 988 self.device_extensions = [] 989 # Commands which are not autogenerated but still intercepted 990 self.no_autogen_list = [ 991 'vkCreateInstance', 992 'vkDestroyInstance', 993 'vkCreateDevice', 994 'vkDestroyDevice', 995 'vkCreateComputePipelines', 996 'vkCreateGraphicsPipelines', 997 'vkCreateSwapchainKHR', 998 'vkCreateSharedSwapchainsKHR', 999 'vkGetSwapchainImagesKHR', 1000 'vkDestroySwapchainKHR', 1001 'vkQueuePresentKHR', 1002 'vkResetDescriptorPool', 1003 'vkDestroyDescriptorPool', 1004 'vkAllocateDescriptorSets', 1005 'vkFreeDescriptorSets', 1006 'vkCreateDescriptorUpdateTemplate', 1007 'vkCreateDescriptorUpdateTemplateKHR', 1008 'vkDestroyDescriptorUpdateTemplate', 1009 'vkDestroyDescriptorUpdateTemplateKHR', 1010 'vkUpdateDescriptorSetWithTemplate', 1011 'vkUpdateDescriptorSetWithTemplateKHR', 1012 'vkCmdPushDescriptorSetWithTemplateKHR', 1013 'vkDebugMarkerSetObjectTagEXT', 1014 'vkDebugMarkerSetObjectNameEXT', 1015 'vkCreateRenderPass', 1016 'vkCreateRenderPass2KHR', 1017 'vkDestroyRenderPass', 1018 'vkSetDebugUtilsObjectNameEXT', 1019 'vkSetDebugUtilsObjectTagEXT', 1020 'vkGetPhysicalDeviceDisplayPropertiesKHR', 1021 'vkGetPhysicalDeviceDisplayProperties2KHR', 1022 'vkGetPhysicalDeviceDisplayPlanePropertiesKHR', 1023 'vkGetPhysicalDeviceDisplayPlaneProperties2KHR', 1024 'vkGetDisplayPlaneSupportedDisplaysKHR', 1025 'vkGetDisplayModePropertiesKHR', 1026 'vkGetDisplayModeProperties2KHR', 1027 'vkEnumerateInstanceExtensionProperties', 1028 'vkEnumerateInstanceLayerProperties', 1029 'vkEnumerateDeviceExtensionProperties', 1030 'vkEnumerateDeviceLayerProperties', 1031 'vkEnumerateInstanceVersion', 1032 ] 1033 self.headerVersion = None 1034 # Internal state - accumulators for different inner block text 1035 self.sections = dict([(section, []) for section in self.ALL_SECTIONS]) 1036 1037 self.cmdMembers = [] 1038 self.cmd_feature_protect = [] # Save ifdef's for each command 1039 self.cmd_info_data = [] # Save the cmdinfo data for wrapping the handles when processing is complete 1040 self.structMembers = [] # List of StructMemberData records for all Vulkan structs 1041 self.extension_structs = [] # List of all structs or sister-structs containing handles 1042 # A sister-struct may contain no handles but shares a structextends attribute with one that does 1043 self.pnext_extension_structs = [] # List of all structs which can be extended by a pnext chain 1044 self.structTypes = dict() # Map of Vulkan struct typename to required VkStructureType 1045 self.struct_member_dict = dict() 1046 # Named tuples to store struct and command data 1047 self.StructType = namedtuple('StructType', ['name', 'value']) 1048 self.CmdMemberData = namedtuple('CmdMemberData', ['name', 'members']) 1049 self.CmdInfoData = namedtuple('CmdInfoData', ['name', 'cmdinfo']) 1050 self.CmdExtraProtect = namedtuple('CmdExtraProtect', ['name', 'extra_protect']) 1051 1052 self.CommandParam = namedtuple('CommandParam', ['type', 'name', 'ispointer', 'isconst', 'iscount', 'len', 'extstructs', 'cdecl', 'islocal', 'iscreate', 'isdestroy', 'feature_protect']) 1053 self.StructMemberData = namedtuple('StructMemberData', ['name', 'members']) 1054 # 1055 def incIndent(self, indent): 1056 inc = ' ' * self.INDENT_SPACES 1057 if indent: 1058 return indent + inc 1059 return inc 1060 # 1061 def decIndent(self, indent): 1062 if indent and (len(indent) > self.INDENT_SPACES): 1063 return indent[:-self.INDENT_SPACES] 1064 return '' 1065 # 1066 # Override makeProtoName to drop the "vk" prefix 1067 def makeProtoName(self, name, tail): 1068 return self.genOpts.apientry + name[2:] + tail 1069 # 1070 # Check if the parameter passed in is a pointer to an array 1071 def paramIsArray(self, param): 1072 return param.attrib.get('len') is not None 1073 # 1074 def beginFile(self, genOpts): 1075 OutputGenerator.beginFile(self, genOpts) 1076 self.appendSection('header_file', self.inline_copyright_message) 1077 # Multiple inclusion protection & C++ namespace. 1078 self.header = False 1079 if (self.genOpts.filename and 'h' == self.genOpts.filename[-1]): 1080 self.header = True 1081 self.appendSection('header_file', '#pragma once') 1082 self.appendSection('header_file', '') 1083 self.appendSection('header_file', '#if defined(LAYER_CHASSIS_CAN_WRAP_HANDLES)') 1084 self.appendSection('header_file', 'extern bool wrap_handles;') 1085 self.appendSection('header_file', '#else') 1086 self.appendSection('header_file', 'extern const bool wrap_handles;') 1087 self.appendSection('header_file', '#endif') 1088 1089 # Now that the data is all collected and complete, generate and output the wrapping/unwrapping routines 1090 def endFile(self): 1091 self.struct_member_dict = dict(self.structMembers) 1092 # Generate the list of APIs that might need to handle wrapped extension structs 1093 self.GenerateCommandWrapExtensionList() 1094 # Write out wrapping/unwrapping functions 1095 self.WrapCommands() 1096 # Build and write out pNext processing function 1097 extension_proc = self.build_extension_processing_func() 1098 1099 if not self.header: 1100 write(self.inline_copyright_message, file=self.outFile) 1101 self.newline() 1102 write('#include <mutex>', file=self.outFile) 1103 write('#include "chassis.h"', file=self.outFile) 1104 write('#include "layer_chassis_dispatch.h"', file=self.outFile) 1105 self.newline() 1106 write('// This intentionally includes a cpp file', file=self.outFile) 1107 write('#include "vk_safe_struct.cpp"', file=self.outFile) 1108 self.newline() 1109 write('std::mutex dispatch_lock;', file=self.outFile) 1110 self.newline() 1111 write('// Unique Objects pNext extension handling function', file=self.outFile) 1112 write('%s' % extension_proc, file=self.outFile) 1113 self.newline() 1114 write('// Manually written Dispatch routines', file=self.outFile) 1115 write('%s' % self.inline_custom_source_preamble, file=self.outFile) 1116 self.newline() 1117 if (self.sections['source_file']): 1118 write('\n'.join(self.sections['source_file']), end=u'', file=self.outFile) 1119 else: 1120 self.newline() 1121 if (self.sections['header_file']): 1122 write('\n'.join(self.sections['header_file']), end=u'', file=self.outFile) 1123 1124 # Finish processing in superclass 1125 OutputGenerator.endFile(self) 1126 # 1127 def beginFeature(self, interface, emit): 1128 # Start processing in superclass 1129 OutputGenerator.beginFeature(self, interface, emit) 1130 self.headerVersion = None 1131 self.featureExtraProtect = GetFeatureProtect(interface) 1132 if self.featureName != 'VK_VERSION_1_0' and self.featureName != 'VK_VERSION_1_1': 1133 white_list_entry = [] 1134 if (self.featureExtraProtect is not None): 1135 white_list_entry += [ '#ifdef %s' % self.featureExtraProtect ] 1136 white_list_entry += [ '"%s"' % self.featureName ] 1137 if (self.featureExtraProtect is not None): 1138 white_list_entry += [ '#endif' ] 1139 featureType = interface.get('type') 1140 if featureType == 'instance': 1141 self.instance_extensions += white_list_entry 1142 elif featureType == 'device': 1143 self.device_extensions += white_list_entry 1144 # 1145 def endFeature(self): 1146 # Finish processing in superclass 1147 OutputGenerator.endFeature(self) 1148 # 1149 def genType(self, typeinfo, name, alias): 1150 OutputGenerator.genType(self, typeinfo, name, alias) 1151 typeElem = typeinfo.elem 1152 # If the type is a struct type, traverse the imbedded <member> tags generating a structure. 1153 # Otherwise, emit the tag text. 1154 category = typeElem.get('category') 1155 if (category == 'struct' or category == 'union'): 1156 self.genStruct(typeinfo, name, alias) 1157 # 1158 # Append a definition to the specified section 1159 def appendSection(self, section, text): 1160 # self.sections[section].append('SECTION: ' + section + '\n') 1161 self.sections[section].append(text) 1162 # 1163 # Check if the parameter passed in is a pointer 1164 def paramIsPointer(self, param): 1165 ispointer = False 1166 for elem in param: 1167 if ((elem.tag is not 'type') and (elem.tail is not None)) and '*' in elem.tail: 1168 ispointer = True 1169 return ispointer 1170 # 1171 # Get the category of a type 1172 def getTypeCategory(self, typename): 1173 types = self.registry.tree.findall("types/type") 1174 for elem in types: 1175 if (elem.find("name") is not None and elem.find('name').text == typename) or elem.attrib.get('name') == typename: 1176 return elem.attrib.get('category') 1177 # 1178 # Check if a parent object is dispatchable or not 1179 def isHandleTypeNonDispatchable(self, handletype): 1180 handle = self.registry.tree.find("types/type/[name='" + handletype + "'][@category='handle']") 1181 if handle is not None and handle.find('type').text == 'VK_DEFINE_NON_DISPATCHABLE_HANDLE': 1182 return True 1183 else: 1184 return False 1185 # 1186 # Retrieve the type and name for a parameter 1187 def getTypeNameTuple(self, param): 1188 type = '' 1189 name = '' 1190 for elem in param: 1191 if elem.tag == 'type': 1192 type = noneStr(elem.text) 1193 elif elem.tag == 'name': 1194 name = noneStr(elem.text) 1195 return (type, name) 1196 # 1197 # Retrieve the value of the len tag 1198 def getLen(self, param): 1199 result = None 1200 len = param.attrib.get('len') 1201 if len and len != 'null-terminated': 1202 # For string arrays, 'len' can look like 'count,null-terminated', indicating that we 1203 # have a null terminated array of strings. We strip the null-terminated from the 1204 # 'len' field and only return the parameter specifying the string count 1205 if 'null-terminated' in len: 1206 result = len.split(',')[0] 1207 else: 1208 result = len 1209 # Spec has now notation for len attributes, using :: instead of platform specific pointer symbol 1210 result = str(result).replace('::', '->') 1211 return result 1212 # 1213 # Generate a VkStructureType based on a structure typename 1214 def genVkStructureType(self, typename): 1215 # Add underscore between lowercase then uppercase 1216 value = re.sub('([a-z0-9])([A-Z])', r'\1_\2', typename) 1217 # Change to uppercase 1218 value = value.upper() 1219 # Add STRUCTURE_TYPE_ 1220 return re.sub('VK_', 'VK_STRUCTURE_TYPE_', value) 1221 # 1222 # Struct parameter check generation. 1223 # This is a special case of the <type> tag where the contents are interpreted as a set of 1224 # <member> tags instead of freeform C type declarations. The <member> tags are just like 1225 # <param> tags - they are a declaration of a struct or union member. Only simple member 1226 # declarations are supported (no nested structs etc.) 1227 def genStruct(self, typeinfo, typeName, alias): 1228 OutputGenerator.genStruct(self, typeinfo, typeName, alias) 1229 members = typeinfo.elem.findall('.//member') 1230 # Iterate over members once to get length parameters for arrays 1231 lens = set() 1232 for member in members: 1233 len = self.getLen(member) 1234 if len: 1235 lens.add(len) 1236 # Generate member info 1237 membersInfo = [] 1238 for member in members: 1239 # Get the member's type and name 1240 info = self.getTypeNameTuple(member) 1241 type = info[0] 1242 name = info[1] 1243 cdecl = self.makeCParamDecl(member, 0) 1244 # Process VkStructureType 1245 if type == 'VkStructureType': 1246 # Extract the required struct type value from the comments 1247 # embedded in the original text defining the 'typeinfo' element 1248 rawXml = etree.tostring(typeinfo.elem).decode('ascii') 1249 result = re.search(r'VK_STRUCTURE_TYPE_\w+', rawXml) 1250 if result: 1251 value = result.group(0) 1252 else: 1253 value = self.genVkStructureType(typeName) 1254 # Store the required type value 1255 self.structTypes[typeName] = self.StructType(name=name, value=value) 1256 # Store pointer/array/string info 1257 extstructs = self.registry.validextensionstructs[typeName] if name == 'pNext' else None 1258 membersInfo.append(self.CommandParam(type=type, 1259 name=name, 1260 ispointer=self.paramIsPointer(member), 1261 isconst=True if 'const' in cdecl else False, 1262 iscount=True if name in lens else False, 1263 len=self.getLen(member), 1264 extstructs=extstructs, 1265 cdecl=cdecl, 1266 islocal=False, 1267 iscreate=False, 1268 isdestroy=False, 1269 feature_protect=self.featureExtraProtect)) 1270 self.structMembers.append(self.StructMemberData(name=typeName, members=membersInfo)) 1271 1272 # 1273 # Insert a lock_guard line 1274 def lock_guard(self, indent): 1275 return '%sstd::lock_guard<std::mutex> lock(dispatch_lock);\n' % indent 1276 # 1277 # Determine if a struct has an NDO as a member or an embedded member 1278 def struct_contains_ndo(self, struct_item): 1279 struct_member_dict = dict(self.structMembers) 1280 struct_members = struct_member_dict[struct_item] 1281 1282 for member in struct_members: 1283 if self.isHandleTypeNonDispatchable(member.type): 1284 return True 1285 elif member.type in struct_member_dict: 1286 if self.struct_contains_ndo(member.type) == True: 1287 return True 1288 return False 1289 # 1290 # Return list of struct members which contain, or which sub-structures contain 1291 # an NDO in a given list of parameters or members 1292 def getParmeterStructsWithNdos(self, item_list): 1293 struct_list = set() 1294 for item in item_list: 1295 paramtype = item.find('type') 1296 typecategory = self.getTypeCategory(paramtype.text) 1297 if typecategory == 'struct': 1298 if self.struct_contains_ndo(paramtype.text) == True: 1299 struct_list.add(item) 1300 return struct_list 1301 # 1302 # Return list of non-dispatchable objects from a given list of parameters or members 1303 def getNdosInParameterList(self, item_list, create_func): 1304 ndo_list = set() 1305 if create_func == True: 1306 member_list = item_list[0:-1] 1307 else: 1308 member_list = item_list 1309 for item in member_list: 1310 if self.isHandleTypeNonDispatchable(paramtype.text): 1311 ndo_list.add(item) 1312 return ndo_list 1313 # 1314 # Construct list of extension structs containing handles, or extension structs that share a structextends attribute 1315 # WITH an extension struct containing handles. All extension structs in any pNext chain will have to be copied. 1316 # TODO: make this recursive -- structs buried three or more levels deep are not searched for extensions 1317 def GenerateCommandWrapExtensionList(self): 1318 for struct in self.structMembers: 1319 if (len(struct.members) > 1) and struct.members[1].extstructs is not None: 1320 found = False; 1321 for item in struct.members[1].extstructs: 1322 if item != '' and item not in self.pnext_extension_structs: 1323 self.pnext_extension_structs.append(item) 1324 if item != '' and self.struct_contains_ndo(item) == True: 1325 found = True 1326 if found == True: 1327 for item in struct.members[1].extstructs: 1328 if item != '' and item not in self.extension_structs: 1329 self.extension_structs.append(item) 1330 # 1331 # Returns True if a struct may have a pNext chain containing an NDO 1332 def StructWithExtensions(self, struct_type): 1333 if struct_type in self.struct_member_dict: 1334 param_info = self.struct_member_dict[struct_type] 1335 if (len(param_info) > 1) and param_info[1].extstructs is not None: 1336 for item in param_info[1].extstructs: 1337 if item in self.extension_structs: 1338 return True 1339 return False 1340 # 1341 # Generate pNext handling function 1342 def build_extension_processing_func(self): 1343 # Construct helper functions to build and free pNext extension chains 1344 pnext_proc = '' 1345 pnext_proc += 'void *CreateUnwrappedExtensionStructs(ValidationObject *layer_data, const void *pNext) {\n' 1346 pnext_proc += ' void *cur_pnext = const_cast<void *>(pNext);\n' 1347 pnext_proc += ' void *head_pnext = NULL;\n' 1348 pnext_proc += ' void *prev_ext_struct = NULL;\n' 1349 pnext_proc += ' void *cur_ext_struct = NULL;\n\n' 1350 pnext_proc += ' while (cur_pnext != NULL) {\n' 1351 pnext_proc += ' VkBaseOutStructure *header = reinterpret_cast<VkBaseOutStructure *>(cur_pnext);\n\n' 1352 pnext_proc += ' switch (header->sType) {\n' 1353 for item in self.pnext_extension_structs: 1354 struct_info = self.struct_member_dict[item] 1355 if struct_info[0].feature_protect is not None: 1356 pnext_proc += '#ifdef %s \n' % struct_info[0].feature_protect 1357 pnext_proc += ' case %s: {\n' % self.structTypes[item].value 1358 pnext_proc += ' safe_%s *safe_struct = new safe_%s;\n' % (item, item) 1359 pnext_proc += ' safe_struct->initialize(reinterpret_cast<const %s *>(cur_pnext));\n' % item 1360 # Generate code to unwrap the handles 1361 indent = ' ' 1362 (tmp_decl, tmp_pre, tmp_post) = self.uniquify_members(struct_info, indent, 'safe_struct->', 0, False, False, False, False) 1363 pnext_proc += tmp_pre 1364 pnext_proc += ' cur_ext_struct = reinterpret_cast<void *>(safe_struct);\n' 1365 pnext_proc += ' } break;\n' 1366 if struct_info[0].feature_protect is not None: 1367 pnext_proc += '#endif // %s \n' % struct_info[0].feature_protect 1368 pnext_proc += '\n' 1369 pnext_proc += ' default:\n' 1370 pnext_proc += ' break;\n' 1371 pnext_proc += ' }\n\n' 1372 pnext_proc += ' // Save pointer to the first structure in the pNext chain\n' 1373 pnext_proc += ' head_pnext = (head_pnext ? head_pnext : cur_ext_struct);\n\n' 1374 pnext_proc += ' // For any extension structure but the first, link the last struct\'s pNext to the current ext struct\n' 1375 pnext_proc += ' if (prev_ext_struct) {\n' 1376 pnext_proc += ' reinterpret_cast<VkBaseOutStructure *>(prev_ext_struct)->pNext = reinterpret_cast<VkBaseOutStructure *>(cur_ext_struct);\n' 1377 pnext_proc += ' }\n' 1378 pnext_proc += ' prev_ext_struct = cur_ext_struct;\n\n' 1379 pnext_proc += ' // Process the next structure in the chain\n' 1380 pnext_proc += ' cur_pnext = header->pNext;\n' 1381 pnext_proc += ' }\n' 1382 pnext_proc += ' return head_pnext;\n' 1383 pnext_proc += '}\n\n' 1384 pnext_proc += '// Free a pNext extension chain\n' 1385 pnext_proc += 'void FreeUnwrappedExtensionStructs(void *head) {\n' 1386 pnext_proc += ' VkBaseOutStructure *curr_ptr = reinterpret_cast<VkBaseOutStructure *>(head);\n' 1387 pnext_proc += ' while (curr_ptr) {\n' 1388 pnext_proc += ' VkBaseOutStructure *header = curr_ptr;\n' 1389 pnext_proc += ' curr_ptr = reinterpret_cast<VkBaseOutStructure *>(header->pNext);\n\n' 1390 pnext_proc += ' switch (header->sType) {\n'; 1391 for item in self.pnext_extension_structs: 1392 struct_info = self.struct_member_dict[item] 1393 if struct_info[0].feature_protect is not None: 1394 pnext_proc += '#ifdef %s \n' % struct_info[0].feature_protect 1395 pnext_proc += ' case %s:\n' % self.structTypes[item].value 1396 pnext_proc += ' delete reinterpret_cast<safe_%s *>(header);\n' % item 1397 pnext_proc += ' break;\n' 1398 if struct_info[0].feature_protect is not None: 1399 pnext_proc += '#endif // %s \n' % struct_info[0].feature_protect 1400 pnext_proc += '\n' 1401 pnext_proc += ' default:\n' 1402 pnext_proc += ' assert(0);\n' 1403 pnext_proc += ' }\n' 1404 pnext_proc += ' }\n' 1405 pnext_proc += '}\n' 1406 return pnext_proc 1407 1408 # 1409 # Generate source for creating a non-dispatchable object 1410 def generate_create_ndo_code(self, indent, proto, params, cmd_info): 1411 create_ndo_code = '' 1412 handle_type = params[-1].find('type') 1413 if self.isHandleTypeNonDispatchable(handle_type.text): 1414 # Check for special case where multiple handles are returned 1415 ndo_array = False 1416 if cmd_info[-1].len is not None: 1417 ndo_array = True; 1418 handle_name = params[-1].find('name') 1419 create_ndo_code += '%sif (VK_SUCCESS == result) {\n' % (indent) 1420 indent = self.incIndent(indent) 1421 create_ndo_code += '%sstd::lock_guard<std::mutex> lock(dispatch_lock);\n' % (indent) 1422 ndo_dest = '*%s' % handle_name.text 1423 if ndo_array == True: 1424 create_ndo_code += '%sfor (uint32_t index0 = 0; index0 < %s; index0++) {\n' % (indent, cmd_info[-1].len) 1425 indent = self.incIndent(indent) 1426 ndo_dest = '%s[index0]' % cmd_info[-1].name 1427 create_ndo_code += '%s%s = layer_data->WrapNew(%s);\n' % (indent, ndo_dest, ndo_dest) 1428 if ndo_array == True: 1429 indent = self.decIndent(indent) 1430 create_ndo_code += '%s}\n' % indent 1431 indent = self.decIndent(indent) 1432 create_ndo_code += '%s}\n' % (indent) 1433 return create_ndo_code 1434 # 1435 # Generate source for destroying a non-dispatchable object 1436 def generate_destroy_ndo_code(self, indent, proto, cmd_info): 1437 destroy_ndo_code = '' 1438 ndo_array = False 1439 if True in [destroy_txt in proto.text for destroy_txt in ['Destroy', 'Free']]: 1440 # Check for special case where multiple handles are returned 1441 if cmd_info[-1].len is not None: 1442 ndo_array = True; 1443 param = -1 1444 else: 1445 param = -2 1446 if self.isHandleTypeNonDispatchable(cmd_info[param].type) == True: 1447 if ndo_array == True: 1448 # This API is freeing an array of handles. Remove them from the unique_id map. 1449 destroy_ndo_code += '%sif ((VK_SUCCESS == result) && (%s)) {\n' % (indent, cmd_info[param].name) 1450 indent = self.incIndent(indent) 1451 destroy_ndo_code += '%sstd::unique_lock<std::mutex> lock(dispatch_lock);\n' % (indent) 1452 destroy_ndo_code += '%sfor (uint32_t index0 = 0; index0 < %s; index0++) {\n' % (indent, cmd_info[param].len) 1453 indent = self.incIndent(indent) 1454 destroy_ndo_code += '%s%s handle = %s[index0];\n' % (indent, cmd_info[param].type, cmd_info[param].name) 1455 destroy_ndo_code += '%suint64_t unique_id = reinterpret_cast<uint64_t &>(handle);\n' % (indent) 1456 destroy_ndo_code += '%sunique_id_mapping.erase(unique_id);\n' % (indent) 1457 indent = self.decIndent(indent); 1458 destroy_ndo_code += '%s}\n' % indent 1459 indent = self.decIndent(indent); 1460 destroy_ndo_code += '%s}\n' % indent 1461 else: 1462 # Remove a single handle from the map 1463 destroy_ndo_code += '%sstd::unique_lock<std::mutex> lock(dispatch_lock);\n' % (indent) 1464 destroy_ndo_code += '%suint64_t %s_id = reinterpret_cast<uint64_t &>(%s);\n' % (indent, cmd_info[param].name, cmd_info[param].name) 1465 destroy_ndo_code += '%s%s = (%s)unique_id_mapping[%s_id];\n' % (indent, cmd_info[param].name, cmd_info[param].type, cmd_info[param].name) 1466 destroy_ndo_code += '%sunique_id_mapping.erase(%s_id);\n' % (indent, cmd_info[param].name) 1467 destroy_ndo_code += '%slock.unlock();\n' % (indent) 1468 return ndo_array, destroy_ndo_code 1469 1470 # 1471 # Clean up local declarations 1472 def cleanUpLocalDeclarations(self, indent, prefix, name, len, index, process_pnext): 1473 cleanup = '%sif (local_%s%s) {\n' % (indent, prefix, name) 1474 if len is not None: 1475 if process_pnext: 1476 cleanup += '%s for (uint32_t %s = 0; %s < %s%s; ++%s) {\n' % (indent, index, index, prefix, len, index) 1477 cleanup += '%s FreeUnwrappedExtensionStructs(const_cast<void *>(local_%s%s[%s].pNext));\n' % (indent, prefix, name, index) 1478 cleanup += '%s }\n' % indent 1479 cleanup += '%s delete[] local_%s%s;\n' % (indent, prefix, name) 1480 else: 1481 if process_pnext: 1482 cleanup += '%s FreeUnwrappedExtensionStructs(const_cast<void *>(local_%s%s->pNext));\n' % (indent, prefix, name) 1483 cleanup += '%s delete local_%s%s;\n' % (indent, prefix, name) 1484 cleanup += "%s}\n" % (indent) 1485 return cleanup 1486 # 1487 # Output UO code for a single NDO (ndo_count is NULL) or a counted list of NDOs 1488 def outputNDOs(self, ndo_type, ndo_name, ndo_count, prefix, index, indent, destroy_func, destroy_array, top_level): 1489 decl_code = '' 1490 pre_call_code = '' 1491 post_call_code = '' 1492 if ndo_count is not None: 1493 if top_level == True: 1494 decl_code += '%s%s *local_%s%s = NULL;\n' % (indent, ndo_type, prefix, ndo_name) 1495 pre_call_code += '%s if (%s%s) {\n' % (indent, prefix, ndo_name) 1496 indent = self.incIndent(indent) 1497 if top_level == True: 1498 pre_call_code += '%s local_%s%s = new %s[%s];\n' % (indent, prefix, ndo_name, ndo_type, ndo_count) 1499 pre_call_code += '%s for (uint32_t %s = 0; %s < %s; ++%s) {\n' % (indent, index, index, ndo_count, index) 1500 indent = self.incIndent(indent) 1501 pre_call_code += '%s local_%s%s[%s] = layer_data->Unwrap(%s[%s]);\n' % (indent, prefix, ndo_name, index, ndo_name, index) 1502 else: 1503 pre_call_code += '%s for (uint32_t %s = 0; %s < %s; ++%s) {\n' % (indent, index, index, ndo_count, index) 1504 indent = self.incIndent(indent) 1505 pre_call_code += '%s %s%s[%s] = layer_data->Unwrap(%s%s[%s]);\n' % (indent, prefix, ndo_name, index, prefix, ndo_name, index) 1506 indent = self.decIndent(indent) 1507 pre_call_code += '%s }\n' % indent 1508 indent = self.decIndent(indent) 1509 pre_call_code += '%s }\n' % indent 1510 if top_level == True: 1511 post_call_code += '%sif (local_%s%s)\n' % (indent, prefix, ndo_name) 1512 indent = self.incIndent(indent) 1513 post_call_code += '%sdelete[] local_%s;\n' % (indent, ndo_name) 1514 else: 1515 if top_level == True: 1516 if (destroy_func == False) or (destroy_array == True): 1517 pre_call_code += '%s %s = layer_data->Unwrap(%s);\n' % (indent, ndo_name, ndo_name) 1518 else: 1519 # Make temp copy of this var with the 'local' removed. It may be better to not pass in 'local_' 1520 # as part of the string and explicitly print it 1521 fix = str(prefix).strip('local_'); 1522 pre_call_code += '%s if (%s%s) {\n' % (indent, fix, ndo_name) 1523 indent = self.incIndent(indent) 1524 pre_call_code += '%s %s%s = layer_data->Unwrap(%s%s);\n' % (indent, prefix, ndo_name, fix, ndo_name) 1525 indent = self.decIndent(indent) 1526 pre_call_code += '%s }\n' % indent 1527 return decl_code, pre_call_code, post_call_code 1528 # 1529 # first_level_param indicates if elements are passed directly into the function else they're below a ptr/struct 1530 # create_func means that this is API creates or allocates NDOs 1531 # destroy_func indicates that this API destroys or frees NDOs 1532 # destroy_array means that the destroy_func operated on an array of NDOs 1533 def uniquify_members(self, members, indent, prefix, array_index, create_func, destroy_func, destroy_array, first_level_param): 1534 decls = '' 1535 pre_code = '' 1536 post_code = '' 1537 index = 'index%s' % str(array_index) 1538 array_index += 1 1539 # Process any NDOs in this structure and recurse for any sub-structs in this struct 1540 for member in members: 1541 process_pnext = self.StructWithExtensions(member.type) 1542 # Handle NDOs 1543 if self.isHandleTypeNonDispatchable(member.type) == True: 1544 count_name = member.len 1545 if (count_name is not None): 1546 if first_level_param == False: 1547 count_name = '%s%s' % (prefix, member.len) 1548 1549 if (first_level_param == False) or (create_func == False) or (not '*' in member.cdecl): 1550 (tmp_decl, tmp_pre, tmp_post) = self.outputNDOs(member.type, member.name, count_name, prefix, index, indent, destroy_func, destroy_array, first_level_param) 1551 decls += tmp_decl 1552 pre_code += tmp_pre 1553 post_code += tmp_post 1554 # Handle Structs that contain NDOs at some level 1555 elif member.type in self.struct_member_dict: 1556 # Structs at first level will have an NDO, OR, we need a safe_struct for the pnext chain 1557 if self.struct_contains_ndo(member.type) == True or process_pnext: 1558 struct_info = self.struct_member_dict[member.type] 1559 # TODO (jbolz): Can this use paramIsPointer? 1560 ispointer = '*' in member.cdecl; 1561 # Struct Array 1562 if member.len is not None: 1563 # Update struct prefix 1564 if first_level_param == True: 1565 new_prefix = 'local_%s' % member.name 1566 # Declare safe_VarType for struct 1567 decls += '%ssafe_%s *%s = NULL;\n' % (indent, member.type, new_prefix) 1568 else: 1569 new_prefix = '%s%s' % (prefix, member.name) 1570 pre_code += '%s if (%s%s) {\n' % (indent, prefix, member.name) 1571 indent = self.incIndent(indent) 1572 if first_level_param == True: 1573 pre_code += '%s %s = new safe_%s[%s];\n' % (indent, new_prefix, member.type, member.len) 1574 pre_code += '%s for (uint32_t %s = 0; %s < %s%s; ++%s) {\n' % (indent, index, index, prefix, member.len, index) 1575 indent = self.incIndent(indent) 1576 if first_level_param == True: 1577 pre_code += '%s %s[%s].initialize(&%s[%s]);\n' % (indent, new_prefix, index, member.name, index) 1578 if process_pnext: 1579 pre_code += '%s %s[%s].pNext = CreateUnwrappedExtensionStructs(layer_data, %s[%s].pNext);\n' % (indent, new_prefix, index, new_prefix, index) 1580 local_prefix = '%s[%s].' % (new_prefix, index) 1581 # Process sub-structs in this struct 1582 (tmp_decl, tmp_pre, tmp_post) = self.uniquify_members(struct_info, indent, local_prefix, array_index, create_func, destroy_func, destroy_array, False) 1583 decls += tmp_decl 1584 pre_code += tmp_pre 1585 post_code += tmp_post 1586 indent = self.decIndent(indent) 1587 pre_code += '%s }\n' % indent 1588 indent = self.decIndent(indent) 1589 pre_code += '%s }\n' % indent 1590 if first_level_param == True: 1591 post_code += self.cleanUpLocalDeclarations(indent, prefix, member.name, member.len, index, process_pnext) 1592 # Single Struct 1593 elif ispointer: 1594 # Update struct prefix 1595 if first_level_param == True: 1596 new_prefix = 'local_%s->' % member.name 1597 decls += '%ssafe_%s *local_%s%s = NULL;\n' % (indent, member.type, prefix, member.name) 1598 else: 1599 new_prefix = '%s%s->' % (prefix, member.name) 1600 # Declare safe_VarType for struct 1601 pre_code += '%s if (%s%s) {\n' % (indent, prefix, member.name) 1602 indent = self.incIndent(indent) 1603 if first_level_param == True: 1604 pre_code += '%s local_%s%s = new safe_%s(%s);\n' % (indent, prefix, member.name, member.type, member.name) 1605 # Process sub-structs in this struct 1606 (tmp_decl, tmp_pre, tmp_post) = self.uniquify_members(struct_info, indent, new_prefix, array_index, create_func, destroy_func, destroy_array, False) 1607 decls += tmp_decl 1608 pre_code += tmp_pre 1609 post_code += tmp_post 1610 if process_pnext: 1611 pre_code += '%s local_%s%s->pNext = CreateUnwrappedExtensionStructs(layer_data, local_%s%s->pNext);\n' % (indent, prefix, member.name, prefix, member.name) 1612 indent = self.decIndent(indent) 1613 pre_code += '%s }\n' % indent 1614 if first_level_param == True: 1615 post_code += self.cleanUpLocalDeclarations(indent, prefix, member.name, member.len, index, process_pnext) 1616 else: 1617 # Update struct prefix 1618 if first_level_param == True: 1619 sys.exit(1) 1620 else: 1621 new_prefix = '%s%s.' % (prefix, member.name) 1622 # Process sub-structs in this struct 1623 (tmp_decl, tmp_pre, tmp_post) = self.uniquify_members(struct_info, indent, new_prefix, array_index, create_func, destroy_func, destroy_array, False) 1624 decls += tmp_decl 1625 pre_code += tmp_pre 1626 post_code += tmp_post 1627 if process_pnext: 1628 pre_code += '%s local_%s%s.pNext = CreateUnwrappedExtensionStructs(layer_data, local_%s%s.pNext);\n' % (indent, prefix, member.name, prefix, member.name) 1629 return decls, pre_code, post_code 1630 # 1631 # For a particular API, generate the non-dispatchable-object wrapping/unwrapping code 1632 def generate_wrapping_code(self, cmd): 1633 indent = ' ' 1634 proto = cmd.find('proto/name') 1635 params = cmd.findall('param') 1636 1637 if proto.text is not None: 1638 cmd_member_dict = dict(self.cmdMembers) 1639 cmd_info = cmd_member_dict[proto.text] 1640 # Handle ndo create/allocate operations 1641 if cmd_info[0].iscreate: 1642 create_ndo_code = self.generate_create_ndo_code(indent, proto, params, cmd_info) 1643 else: 1644 create_ndo_code = '' 1645 # Handle ndo destroy/free operations 1646 if cmd_info[0].isdestroy: 1647 (destroy_array, destroy_ndo_code) = self.generate_destroy_ndo_code(indent, proto, cmd_info) 1648 else: 1649 destroy_array = False 1650 destroy_ndo_code = '' 1651 paramdecl = '' 1652 param_pre_code = '' 1653 param_post_code = '' 1654 create_func = True if create_ndo_code else False 1655 destroy_func = True if destroy_ndo_code else False 1656 (paramdecl, param_pre_code, param_post_code) = self.uniquify_members(cmd_info, indent, '', 0, create_func, destroy_func, destroy_array, True) 1657 param_post_code += create_ndo_code 1658 if destroy_ndo_code: 1659 if destroy_array == True: 1660 param_post_code += destroy_ndo_code 1661 else: 1662 param_pre_code += destroy_ndo_code 1663 if param_pre_code: 1664 if (not destroy_func) or (destroy_array): 1665 param_pre_code = '%s{\n%s%s%s%s}\n' % (' ', indent, self.lock_guard(indent), param_pre_code, indent) 1666 return paramdecl, param_pre_code, param_post_code 1667 # 1668 # Capture command parameter info needed to wrap NDOs as well as handling some boilerplate code 1669 def genCmd(self, cmdinfo, cmdname, alias): 1670 1671 # Add struct-member type information to command parameter information 1672 OutputGenerator.genCmd(self, cmdinfo, cmdname, alias) 1673 members = cmdinfo.elem.findall('.//param') 1674 # Iterate over members once to get length parameters for arrays 1675 lens = set() 1676 for member in members: 1677 len = self.getLen(member) 1678 if len: 1679 lens.add(len) 1680 struct_member_dict = dict(self.structMembers) 1681 # Generate member info 1682 membersInfo = [] 1683 for member in members: 1684 # Get type and name of member 1685 info = self.getTypeNameTuple(member) 1686 type = info[0] 1687 name = info[1] 1688 cdecl = self.makeCParamDecl(member, 0) 1689 # Check for parameter name in lens set 1690 iscount = True if name in lens else False 1691 len = self.getLen(member) 1692 isconst = True if 'const' in cdecl else False 1693 ispointer = self.paramIsPointer(member) 1694 # Mark param as local if it is an array of NDOs 1695 islocal = False; 1696 if self.isHandleTypeNonDispatchable(type) == True: 1697 if (len is not None) and (isconst == True): 1698 islocal = True 1699 # Or if it's a struct that contains an NDO 1700 elif type in struct_member_dict: 1701 if self.struct_contains_ndo(type) == True: 1702 islocal = True 1703 isdestroy = True if True in [destroy_txt in cmdname for destroy_txt in ['Destroy', 'Free']] else False 1704 iscreate = True if True in [create_txt in cmdname for create_txt in ['Create', 'Allocate', 'GetRandROutputDisplayEXT', 'RegisterDeviceEvent', 'RegisterDisplayEvent']] else False 1705 extstructs = self.registry.validextensionstructs[type] if name == 'pNext' else None 1706 membersInfo.append(self.CommandParam(type=type, 1707 name=name, 1708 ispointer=ispointer, 1709 isconst=isconst, 1710 iscount=iscount, 1711 len=len, 1712 extstructs=extstructs, 1713 cdecl=cdecl, 1714 islocal=islocal, 1715 iscreate=iscreate, 1716 isdestroy=isdestroy, 1717 feature_protect=self.featureExtraProtect)) 1718 self.cmdMembers.append(self.CmdMemberData(name=cmdname, members=membersInfo)) 1719 self.cmd_info_data.append(self.CmdInfoData(name=cmdname, cmdinfo=cmdinfo)) 1720 self.cmd_feature_protect.append(self.CmdExtraProtect(name=cmdname, extra_protect=self.featureExtraProtect)) 1721 # 1722 # Create prototype for dispatch header file 1723 def GenDispatchFunctionPrototype(self, cmdinfo, ifdef_text): 1724 decls = self.makeCDecls(cmdinfo.elem) 1725 func_sig = decls[0][:-1] 1726 func_sig = func_sig.replace("VKAPI_ATTR ", "") 1727 func_sig = func_sig.replace("VKAPI_CALL ", "Dispatch") 1728 func_sig = func_sig.replace("(", "(ValidationObject *layer_data, ") 1729 func_sig += ';' 1730 dispatch_prototype = '' 1731 if ifdef_text is not None: 1732 dispatch_prototype = '#ifdef %s\n' % ifdef_text 1733 dispatch_prototype += func_sig 1734 if ifdef_text is not None: 1735 dispatch_prototype += '\n#endif // %s' % ifdef_text 1736 return dispatch_prototype 1737 # 1738 # Create code to wrap NDOs as well as handling some boilerplate code 1739 def WrapCommands(self): 1740 cmd_member_dict = dict(self.cmdMembers) 1741 cmd_info_dict = dict(self.cmd_info_data) 1742 cmd_protect_dict = dict(self.cmd_feature_protect) 1743 1744 for api_call in self.cmdMembers: 1745 cmdname = api_call.name 1746 cmdinfo = cmd_info_dict[api_call.name] 1747 feature_extra_protect = cmd_protect_dict[api_call.name] 1748 1749 # Add fuction prototype to header data 1750 self.appendSection('header_file', self.GenDispatchFunctionPrototype(cmdinfo, feature_extra_protect)) 1751 1752 if cmdname in self.no_autogen_list: 1753 decls = self.makeCDecls(cmdinfo.elem) 1754 self.appendSection('source_file', '') 1755 self.appendSection('source_file', '// Skip %s dispatch, manually generated' % cmdname) 1756 continue 1757 1758 # Generate NDO wrapping/unwrapping code for all parameters 1759 (api_decls, api_pre, api_post) = self.generate_wrapping_code(cmdinfo.elem) 1760 # If API doesn't contain NDO's, we still need to make a down-chain call 1761 down_chain_call_only = False 1762 if not api_decls and not api_pre and not api_post: 1763 down_chain_call_only = True 1764 if (feature_extra_protect is not None): 1765 self.appendSection('source_file', '') 1766 self.appendSection('source_file', '#ifdef ' + feature_extra_protect) 1767 1768 decls = self.makeCDecls(cmdinfo.elem) 1769 func_sig = decls[0][:-1] 1770 func_sig = func_sig.replace("VKAPI_ATTR ", "") 1771 func_sig = func_sig.replace("VKAPI_CALL ", "Dispatch") 1772 func_sig = func_sig.replace("(", "(ValidationObject *layer_data, ") 1773 self.appendSection('source_file', '') 1774 self.appendSection('source_file', func_sig) 1775 self.appendSection('source_file', '{') 1776 # Setup common to call wrappers, first parameter is always dispatchable 1777 dispatchable_type = cmdinfo.elem.find('param/type').text 1778 dispatchable_name = cmdinfo.elem.find('param/name').text 1779 1780 # Gather the parameter items 1781 params = cmdinfo.elem.findall('param/name') 1782 # Pull out the text for each of the parameters, separate them by commas in a list 1783 paramstext = ', '.join([str(param.text) for param in params]) 1784 wrapped_paramstext = paramstext 1785 # If any of these paramters has been replaced by a local var, fix up the list 1786 params = cmd_member_dict[cmdname] 1787 for param in params: 1788 if param.islocal == True or self.StructWithExtensions(param.type): 1789 if param.ispointer == True: 1790 wrapped_paramstext = wrapped_paramstext.replace(param.name, '(%s %s*)local_%s' % ('const', param.type, param.name)) 1791 else: 1792 wrapped_paramstext = wrapped_paramstext.replace(param.name, '(%s %s)local_%s' % ('const', param.type, param.name)) 1793 1794 # First, add check and down-chain call. Use correct dispatch table 1795 dispatch_table_type = "device_dispatch_table" 1796 if dispatchable_type in ["VkPhysicalDevice", "VkInstance"]: 1797 dispatch_table_type = "instance_dispatch_table" 1798 1799 api_func = cmdinfo.elem.attrib.get('name').replace('vk','layer_data->%s.',1) % dispatch_table_type 1800 1801 # Put all this together for the final down-chain call 1802 if not down_chain_call_only: 1803 unwrapped_dispatch_call = api_func + '(' + paramstext + ')' 1804 self.appendSection('source_file', ' if (!wrap_handles) return %s;' % unwrapped_dispatch_call) 1805 1806 # Handle return values, if any 1807 resulttype = cmdinfo.elem.find('proto/type') 1808 if (resulttype is not None and resulttype.text == 'void'): 1809 resulttype = None 1810 if (resulttype is not None): 1811 assignresult = resulttype.text + ' result = ' 1812 else: 1813 assignresult = '' 1814 # Pre-pend declarations and pre-api-call codegen 1815 if api_decls: 1816 self.appendSection('source_file', "\n".join(str(api_decls).rstrip().split("\n"))) 1817 if api_pre: 1818 self.appendSection('source_file', "\n".join(str(api_pre).rstrip().split("\n"))) 1819 # Generate the wrapped dispatch call 1820 self.appendSection('source_file', ' ' + assignresult + api_func + '(' + wrapped_paramstext + ');') 1821 1822 # And add the post-API-call codegen 1823 self.appendSection('source_file', "\n".join(str(api_post).rstrip().split("\n"))) 1824 # Handle the return result variable, if any 1825 if (resulttype is not None): 1826 self.appendSection('source_file', ' return result;') 1827 self.appendSection('source_file', '}') 1828 if (feature_extra_protect is not None): 1829 self.appendSection('source_file', '#endif // '+ feature_extra_protect) 1830 1831