1 // Copyright 2021 The SwiftShader Authors. All Rights Reserved. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 #ifndef VK_STRUCT_CONVERSION_HPP_ 16 #define VK_STRUCT_CONVERSION_HPP_ 17 18 #include "VkMemory.hpp" 19 #include "VkStringify.hpp" 20 #include <cstring> 21 #include <vector> 22 23 namespace vk { 24 25 struct CopyBufferInfo : public VkCopyBufferInfo2 26 { CopyBufferInfovk::CopyBufferInfo27 CopyBufferInfo(VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferCopy *pRegions) 28 : VkCopyBufferInfo2{ 29 VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2, 30 nullptr, 31 srcBuffer, 32 dstBuffer, 33 regionCount, 34 nullptr 35 } 36 { 37 regions.resize(regionCount); 38 for(uint32_t i = 0; i < regionCount; i++) 39 { 40 regions[i] = { 41 VK_STRUCTURE_TYPE_BUFFER_COPY_2, 42 nullptr, 43 pRegions[i].srcOffset, 44 pRegions[i].dstOffset, 45 pRegions[i].size 46 }; 47 } 48 49 this->pRegions = ®ions.front(); 50 } 51 52 private: 53 std::vector<VkBufferCopy2> regions; 54 }; 55 56 struct CopyImageInfo : public VkCopyImageInfo2 57 { CopyImageInfovk::CopyImageInfo58 CopyImageInfo(VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) 59 : VkCopyImageInfo2{ 60 VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2, 61 nullptr, 62 srcImage, 63 srcImageLayout, 64 dstImage, 65 dstImageLayout, 66 regionCount, 67 nullptr 68 } 69 { 70 regions.resize(regionCount); 71 for(uint32_t i = 0; i < regionCount; i++) 72 { 73 regions[i] = { 74 VK_STRUCTURE_TYPE_IMAGE_COPY_2, 75 nullptr, 76 pRegions[i].srcSubresource, 77 pRegions[i].srcOffset, 78 pRegions[i].dstSubresource, 79 pRegions[i].dstOffset, 80 pRegions[i].extent 81 }; 82 } 83 84 this->pRegions = ®ions.front(); 85 } 86 87 private: 88 std::vector<VkImageCopy2> regions; 89 }; 90 91 struct BlitImageInfo : public VkBlitImageInfo2 92 { BlitImageInfovk::BlitImageInfo93 BlitImageInfo(VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) 94 : VkBlitImageInfo2{ 95 VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2, 96 nullptr, 97 srcImage, 98 srcImageLayout, 99 dstImage, 100 dstImageLayout, 101 regionCount, 102 nullptr, 103 filter 104 } 105 { 106 regions.resize(regionCount); 107 for(uint32_t i = 0; i < regionCount; i++) 108 { 109 regions[i] = { 110 VK_STRUCTURE_TYPE_IMAGE_BLIT_2, 111 nullptr, 112 pRegions[i].srcSubresource, 113 { pRegions[i].srcOffsets[0], pRegions[i].srcOffsets[1] }, 114 pRegions[i].dstSubresource, 115 { pRegions[i].dstOffsets[0], pRegions[i].dstOffsets[1] } 116 }; 117 } 118 119 this->pRegions = ®ions.front(); 120 } 121 122 private: 123 std::vector<VkImageBlit2> regions; 124 }; 125 126 struct CopyBufferToImageInfo : public VkCopyBufferToImageInfo2 127 { CopyBufferToImageInfovk::CopyBufferToImageInfo128 CopyBufferToImageInfo(VkBuffer srcBuffer, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy *pRegions) 129 : VkCopyBufferToImageInfo2{ 130 VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2, 131 nullptr, 132 srcBuffer, 133 dstImage, 134 dstImageLayout, 135 regionCount, 136 nullptr 137 } 138 { 139 regions.resize(regionCount); 140 for(uint32_t i = 0; i < regionCount; i++) 141 { 142 regions[i] = { 143 VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2, 144 nullptr, 145 pRegions[i].bufferOffset, 146 pRegions[i].bufferRowLength, 147 pRegions[i].bufferImageHeight, 148 pRegions[i].imageSubresource, 149 pRegions[i].imageOffset, 150 pRegions[i].imageExtent 151 }; 152 } 153 154 this->pRegions = ®ions.front(); 155 } 156 157 private: 158 std::vector<VkBufferImageCopy2> regions; 159 }; 160 161 struct CopyImageToBufferInfo : public VkCopyImageToBufferInfo2 162 { CopyImageToBufferInfovk::CopyImageToBufferInfo163 CopyImageToBufferInfo(VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) 164 : VkCopyImageToBufferInfo2{ 165 VK_STRUCTURE_TYPE_COPY_IMAGE_TO_BUFFER_INFO_2, 166 nullptr, 167 srcImage, 168 srcImageLayout, 169 dstBuffer, 170 regionCount, 171 nullptr 172 } 173 { 174 regions.resize(regionCount); 175 for(uint32_t i = 0; i < regionCount; i++) 176 { 177 regions[i] = { 178 VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2, 179 nullptr, 180 pRegions[i].bufferOffset, 181 pRegions[i].bufferRowLength, 182 pRegions[i].bufferImageHeight, 183 pRegions[i].imageSubresource, 184 pRegions[i].imageOffset, 185 pRegions[i].imageExtent 186 }; 187 } 188 189 this->pRegions = ®ions.front(); 190 } 191 192 private: 193 std::vector<VkBufferImageCopy2> regions; 194 }; 195 196 struct ResolveImageInfo : public VkResolveImageInfo2 197 { ResolveImageInfovk::ResolveImageInfo198 ResolveImageInfo(VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) 199 : VkResolveImageInfo2{ 200 VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2, 201 nullptr, 202 srcImage, 203 srcImageLayout, 204 dstImage, 205 dstImageLayout, 206 regionCount, 207 nullptr 208 } 209 { 210 regions.resize(regionCount); 211 for(uint32_t i = 0; i < regionCount; i++) 212 { 213 regions[i] = { 214 VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2, 215 nullptr, 216 pRegions[i].srcSubresource, 217 pRegions[i].srcOffset, 218 pRegions[i].dstSubresource, 219 pRegions[i].dstOffset, 220 pRegions[i].extent 221 }; 222 } 223 224 this->pRegions = ®ions.front(); 225 } 226 227 private: 228 std::vector<VkImageResolve2> regions; 229 }; 230 231 struct DependencyInfo : public VkDependencyInfo 232 { DependencyInfovk::DependencyInfo233 DependencyInfo(VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, 234 VkDependencyFlags dependencyFlags, 235 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, 236 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, 237 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) 238 : VkDependencyInfo{ 239 VK_STRUCTURE_TYPE_DEPENDENCY_INFO, 240 nullptr, 241 dependencyFlags, 242 memoryBarrierCount, 243 nullptr, 244 bufferMemoryBarrierCount, 245 nullptr, 246 imageMemoryBarrierCount, 247 nullptr 248 } 249 { 250 if((memoryBarrierCount == 0) && 251 (bufferMemoryBarrierCount == 0) && 252 (imageMemoryBarrierCount == 0)) 253 { 254 // Create a single memory barrier entry to store the source and destination stage masks 255 memoryBarriers.resize(1); 256 memoryBarriers[0] = { 257 VK_STRUCTURE_TYPE_MEMORY_BARRIER_2, 258 nullptr, 259 srcStageMask, 260 VK_ACCESS_2_NONE, 261 dstStageMask, 262 VK_ACCESS_2_NONE 263 }; 264 } 265 else 266 { 267 memoryBarriers.resize(memoryBarrierCount); 268 for(uint32_t i = 0; i < memoryBarrierCount; i++) 269 { 270 memoryBarriers[i] = { 271 VK_STRUCTURE_TYPE_MEMORY_BARRIER_2, 272 pMemoryBarriers[i].pNext, 273 srcStageMask, 274 pMemoryBarriers[i].srcAccessMask, 275 dstStageMask, 276 pMemoryBarriers[i].dstAccessMask 277 }; 278 } 279 280 bufferMemoryBarriers.resize(bufferMemoryBarrierCount); 281 for(uint32_t i = 0; i < bufferMemoryBarrierCount; i++) 282 { 283 bufferMemoryBarriers[i] = { 284 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2, 285 pBufferMemoryBarriers[i].pNext, 286 srcStageMask, 287 pBufferMemoryBarriers[i].srcAccessMask, 288 dstStageMask, 289 pBufferMemoryBarriers[i].dstAccessMask, 290 pBufferMemoryBarriers[i].srcQueueFamilyIndex, 291 pBufferMemoryBarriers[i].dstQueueFamilyIndex, 292 pBufferMemoryBarriers[i].buffer, 293 pBufferMemoryBarriers[i].offset, 294 pBufferMemoryBarriers[i].size 295 }; 296 } 297 298 imageMemoryBarriers.resize(imageMemoryBarrierCount); 299 for(uint32_t i = 0; i < imageMemoryBarrierCount; i++) 300 { 301 imageMemoryBarriers[i] = { 302 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2, 303 pImageMemoryBarriers[i].pNext, 304 srcStageMask, 305 pImageMemoryBarriers[i].srcAccessMask, 306 dstStageMask, 307 pImageMemoryBarriers[i].dstAccessMask, 308 pImageMemoryBarriers[i].oldLayout, 309 pImageMemoryBarriers[i].newLayout, 310 pImageMemoryBarriers[i].srcQueueFamilyIndex, 311 pImageMemoryBarriers[i].dstQueueFamilyIndex, 312 pImageMemoryBarriers[i].image, 313 pImageMemoryBarriers[i].subresourceRange 314 }; 315 } 316 } 317 318 this->pMemoryBarriers = memoryBarriers.empty() ? nullptr : &memoryBarriers.front(); 319 this->pBufferMemoryBarriers = bufferMemoryBarriers.empty() ? nullptr : &bufferMemoryBarriers.front(); 320 this->pImageMemoryBarriers = imageMemoryBarriers.empty() ? nullptr : &imageMemoryBarriers.front(); 321 } 322 323 private: 324 std::vector<VkMemoryBarrier2> memoryBarriers; 325 std::vector<VkBufferMemoryBarrier2> bufferMemoryBarriers; 326 std::vector<VkImageMemoryBarrier2> imageMemoryBarriers; 327 }; 328 329 struct ImageSubresource : VkImageSubresource 330 { ImageSubresourcevk::ImageSubresource331 ImageSubresource(const VkImageSubresourceLayers &subresourceLayers) 332 : VkImageSubresource{ 333 subresourceLayers.aspectMask, 334 subresourceLayers.mipLevel, 335 subresourceLayers.baseArrayLayer 336 } 337 {} 338 }; 339 340 struct ImageSubresourceRange : VkImageSubresourceRange 341 { ImageSubresourceRangevk::ImageSubresourceRange342 ImageSubresourceRange(const VkImageSubresourceLayers &subresourceLayers) 343 : VkImageSubresourceRange{ 344 subresourceLayers.aspectMask, 345 subresourceLayers.mipLevel, 346 1, 347 subresourceLayers.baseArrayLayer, 348 subresourceLayers.layerCount 349 } 350 {} 351 }; 352 353 struct Extent2D : VkExtent2D 354 { Extent2Dvk::Extent2D355 Extent2D(const VkExtent3D &extent3D) 356 : VkExtent2D{ extent3D.width, extent3D.height } 357 {} 358 }; 359 360 struct SubmitInfo 361 { Allocatevk::SubmitInfo362 static SubmitInfo *Allocate(uint32_t submitCount, const VkSubmitInfo *pSubmits) 363 { 364 size_t submitSize = sizeof(SubmitInfo) * submitCount; 365 size_t totalSize = submitSize; 366 for(uint32_t i = 0; i < submitCount; i++) 367 { 368 totalSize += pSubmits[i].waitSemaphoreCount * sizeof(VkSemaphore); 369 totalSize += pSubmits[i].waitSemaphoreCount * sizeof(VkPipelineStageFlags); 370 totalSize += pSubmits[i].signalSemaphoreCount * sizeof(VkSemaphore); 371 totalSize += pSubmits[i].commandBufferCount * sizeof(VkCommandBuffer); 372 373 for(const auto *extension = reinterpret_cast<const VkBaseInStructure *>(pSubmits[i].pNext); 374 extension != nullptr; extension = reinterpret_cast<const VkBaseInStructure *>(extension->pNext)) 375 { 376 switch(extension->sType) 377 { 378 case VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO: 379 { 380 const auto *tlsSubmitInfo = reinterpret_cast<const VkTimelineSemaphoreSubmitInfo *>(extension); 381 totalSize += tlsSubmitInfo->waitSemaphoreValueCount * sizeof(uint64_t); 382 totalSize += tlsSubmitInfo->signalSemaphoreValueCount * sizeof(uint64_t); 383 } 384 break; 385 case VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO: 386 // SwiftShader doesn't use device group submit info because it only supports a single physical device. 387 // However, this extension is core in Vulkan 1.1, so we must treat it as a valid structure type. 388 break; 389 case VK_STRUCTURE_TYPE_MAX_ENUM: 390 // dEQP tests that this value is ignored. 391 break; 392 default: 393 UNSUPPORTED("submitInfo[%d]->pNext sType: %s", i, vk::Stringify(extension->sType).c_str()); 394 break; 395 } 396 } 397 } 398 399 uint8_t *mem = static_cast<uint8_t *>( 400 vk::allocateHostMemory(totalSize, vk::REQUIRED_MEMORY_ALIGNMENT, vk::NULL_ALLOCATION_CALLBACKS, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT)); 401 402 auto submits = new(mem) SubmitInfo[submitCount]; 403 mem += submitSize; 404 405 for(uint32_t i = 0; i < submitCount; i++) 406 { 407 submits[i].commandBufferCount = pSubmits[i].commandBufferCount; 408 submits[i].signalSemaphoreCount = pSubmits[i].signalSemaphoreCount; 409 submits[i].waitSemaphoreCount = pSubmits[i].waitSemaphoreCount; 410 411 submits[i].pWaitSemaphores = nullptr; 412 submits[i].pWaitDstStageMask = nullptr; 413 submits[i].pSignalSemaphores = nullptr; 414 submits[i].pCommandBuffers = nullptr; 415 416 if(pSubmits[i].waitSemaphoreCount > 0) 417 { 418 size_t size = pSubmits[i].waitSemaphoreCount * sizeof(VkSemaphore); 419 submits[i].pWaitSemaphores = reinterpret_cast<VkSemaphore *>(mem); 420 memcpy(mem, pSubmits[i].pWaitSemaphores, size); 421 mem += size; 422 423 size = pSubmits[i].waitSemaphoreCount * sizeof(VkPipelineStageFlags); 424 submits[i].pWaitDstStageMask = reinterpret_cast<VkPipelineStageFlags *>(mem); 425 memcpy(mem, pSubmits[i].pWaitDstStageMask, size); 426 mem += size; 427 } 428 429 if(pSubmits[i].signalSemaphoreCount > 0) 430 { 431 size_t size = pSubmits[i].signalSemaphoreCount * sizeof(VkSemaphore); 432 submits[i].pSignalSemaphores = reinterpret_cast<VkSemaphore *>(mem); 433 memcpy(mem, pSubmits[i].pSignalSemaphores, size); 434 mem += size; 435 } 436 437 if(pSubmits[i].commandBufferCount > 0) 438 { 439 size_t size = pSubmits[i].commandBufferCount * sizeof(VkCommandBuffer); 440 submits[i].pCommandBuffers = reinterpret_cast<VkCommandBuffer *>(mem); 441 memcpy(mem, pSubmits[i].pCommandBuffers, size); 442 mem += size; 443 } 444 445 submits[i].waitSemaphoreValueCount = 0; 446 submits[i].pWaitSemaphoreValues = nullptr; 447 submits[i].signalSemaphoreValueCount = 0; 448 submits[i].pSignalSemaphoreValues = nullptr; 449 450 for(const auto *extension = reinterpret_cast<const VkBaseInStructure *>(pSubmits[i].pNext); 451 extension != nullptr; extension = reinterpret_cast<const VkBaseInStructure *>(extension->pNext)) 452 { 453 switch(extension->sType) 454 { 455 case VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO: 456 { 457 const VkTimelineSemaphoreSubmitInfo *tlsSubmitInfo = reinterpret_cast<const VkTimelineSemaphoreSubmitInfo *>(extension); 458 459 if(tlsSubmitInfo->waitSemaphoreValueCount > 0) 460 { 461 submits[i].waitSemaphoreValueCount = tlsSubmitInfo->waitSemaphoreValueCount; 462 size_t size = tlsSubmitInfo->waitSemaphoreValueCount * sizeof(uint64_t); 463 submits[i].pWaitSemaphoreValues = reinterpret_cast<uint64_t *>(mem); 464 memcpy(mem, tlsSubmitInfo->pWaitSemaphoreValues, size); 465 mem += size; 466 } 467 468 if(tlsSubmitInfo->signalSemaphoreValueCount > 0) 469 { 470 submits[i].signalSemaphoreValueCount = tlsSubmitInfo->signalSemaphoreValueCount; 471 size_t size = tlsSubmitInfo->signalSemaphoreValueCount * sizeof(uint64_t); 472 submits[i].pSignalSemaphoreValues = reinterpret_cast<uint64_t *>(mem); 473 memcpy(mem, tlsSubmitInfo->pSignalSemaphoreValues, size); 474 mem += size; 475 } 476 } 477 break; 478 case VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO: 479 // SwiftShader doesn't use device group submit info because it only supports a single physical device. 480 // However, this extension is core in Vulkan 1.1, so we must treat it as a valid structure type. 481 break; 482 case VK_STRUCTURE_TYPE_MAX_ENUM: 483 // dEQP tests that this value is ignored. 484 break; 485 default: 486 UNSUPPORTED("submitInfo[%d]->pNext sType: %s", i, vk::Stringify(extension->sType).c_str()); 487 break; 488 } 489 } 490 } 491 492 return submits; 493 } 494 Allocatevk::SubmitInfo495 static SubmitInfo *Allocate(uint32_t submitCount, const VkSubmitInfo2 *pSubmits) 496 { 497 size_t submitSize = sizeof(SubmitInfo) * submitCount; 498 size_t totalSize = submitSize; 499 for(uint32_t i = 0; i < submitCount; i++) 500 { 501 totalSize += pSubmits[i].waitSemaphoreInfoCount * sizeof(VkSemaphore); 502 totalSize += pSubmits[i].waitSemaphoreInfoCount * sizeof(VkPipelineStageFlags); 503 totalSize += pSubmits[i].waitSemaphoreInfoCount * sizeof(uint64_t); 504 totalSize += pSubmits[i].signalSemaphoreInfoCount * sizeof(VkSemaphore); 505 totalSize += pSubmits[i].signalSemaphoreInfoCount * sizeof(uint64_t); 506 totalSize += pSubmits[i].commandBufferInfoCount * sizeof(VkCommandBuffer); 507 508 for(const auto *extension = reinterpret_cast<const VkBaseInStructure *>(pSubmits[i].pNext); 509 extension != nullptr; extension = reinterpret_cast<const VkBaseInStructure *>(extension->pNext)) 510 { 511 switch(extension->sType) 512 { 513 case VK_STRUCTURE_TYPE_MAX_ENUM: 514 // dEQP tests that this value is ignored. 515 break; 516 case VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_SUBMIT_INFO_KHR: // VK_KHR_performance_query 517 case VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR: // VK_KHR_win32_keyed_mutex 518 case VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV: // VK_NV_win32_keyed_mutex 519 default: 520 UNSUPPORTED("submitInfo[%d]->pNext sType: %s", i, vk::Stringify(extension->sType).c_str()); 521 break; 522 } 523 } 524 } 525 526 uint8_t *mem = static_cast<uint8_t *>( 527 vk::allocateHostMemory(totalSize, vk::REQUIRED_MEMORY_ALIGNMENT, vk::NULL_ALLOCATION_CALLBACKS, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT)); 528 529 auto submits = new(mem) SubmitInfo[submitCount]; 530 mem += submitSize; 531 532 for(uint32_t i = 0; i < submitCount; i++) 533 { 534 submits[i].commandBufferCount = pSubmits[i].commandBufferInfoCount; 535 submits[i].signalSemaphoreCount = pSubmits[i].signalSemaphoreInfoCount; 536 submits[i].waitSemaphoreCount = pSubmits[i].waitSemaphoreInfoCount; 537 538 submits[i].signalSemaphoreValueCount = pSubmits[i].signalSemaphoreInfoCount; 539 submits[i].waitSemaphoreValueCount = pSubmits[i].waitSemaphoreInfoCount; 540 541 submits[i].pWaitSemaphores = nullptr; 542 submits[i].pWaitDstStageMask = nullptr; 543 submits[i].pSignalSemaphores = nullptr; 544 submits[i].pCommandBuffers = nullptr; 545 submits[i].pWaitSemaphoreValues = nullptr; 546 submits[i].pSignalSemaphoreValues = nullptr; 547 548 if(submits[i].waitSemaphoreCount > 0) 549 { 550 size_t size = submits[i].waitSemaphoreCount * sizeof(VkSemaphore); 551 submits[i].pWaitSemaphores = reinterpret_cast<VkSemaphore *>(mem); 552 mem += size; 553 554 size = submits[i].waitSemaphoreCount * sizeof(VkPipelineStageFlags); 555 submits[i].pWaitDstStageMask = reinterpret_cast<VkPipelineStageFlags *>(mem); 556 mem += size; 557 558 size = submits[i].waitSemaphoreCount * sizeof(uint64_t); 559 submits[i].pWaitSemaphoreValues = reinterpret_cast<uint64_t *>(mem); 560 mem += size; 561 562 for(uint32_t j = 0; j < submits[i].waitSemaphoreCount; j++) 563 { 564 submits[i].pWaitSemaphores[j] = pSubmits[i].pWaitSemaphoreInfos[j].semaphore; 565 submits[i].pWaitDstStageMask[j] = pSubmits[i].pWaitSemaphoreInfos[j].stageMask; 566 submits[i].pWaitSemaphoreValues[j] = pSubmits[i].pWaitSemaphoreInfos[j].value; 567 } 568 } 569 570 if(submits[i].signalSemaphoreCount > 0) 571 { 572 size_t size = submits[i].signalSemaphoreCount * sizeof(VkSemaphore); 573 submits[i].pSignalSemaphores = reinterpret_cast<VkSemaphore *>(mem); 574 mem += size; 575 576 size = submits[i].signalSemaphoreCount * sizeof(uint64_t); 577 submits[i].pSignalSemaphoreValues = reinterpret_cast<uint64_t *>(mem); 578 mem += size; 579 580 for(uint32_t j = 0; j < submits[i].signalSemaphoreCount; j++) 581 { 582 submits[i].pSignalSemaphores[j] = pSubmits[i].pSignalSemaphoreInfos[j].semaphore; 583 submits[i].pSignalSemaphoreValues[j] = pSubmits[i].pSignalSemaphoreInfos[j].value; 584 } 585 } 586 587 if(submits[i].commandBufferCount > 0) 588 { 589 size_t size = submits[i].commandBufferCount * sizeof(VkCommandBuffer); 590 submits[i].pCommandBuffers = reinterpret_cast<VkCommandBuffer *>(mem); 591 mem += size; 592 593 for(uint32_t j = 0; j < submits[i].commandBufferCount; j++) 594 { 595 submits[i].pCommandBuffers[j] = pSubmits[i].pCommandBufferInfos[j].commandBuffer; 596 } 597 } 598 } 599 600 return submits; 601 } 602 Releasevk::SubmitInfo603 static void Release(SubmitInfo *submitInfo) 604 { 605 vk::freeHostMemory(submitInfo, NULL_ALLOCATION_CALLBACKS); 606 } 607 608 uint32_t waitSemaphoreCount; 609 VkSemaphore *pWaitSemaphores; 610 VkPipelineStageFlags *pWaitDstStageMask; 611 uint32_t commandBufferCount; 612 VkCommandBuffer *pCommandBuffers; 613 uint32_t signalSemaphoreCount; 614 VkSemaphore *pSignalSemaphores; 615 uint32_t waitSemaphoreValueCount; 616 uint64_t *pWaitSemaphoreValues; 617 uint32_t signalSemaphoreValueCount; 618 uint64_t *pSignalSemaphoreValues; 619 }; 620 621 } // namespace vk 622 623 #endif // VK_STRUCT_CONVERSION_HPP_