1 /*
2 * Copyright © 2024 Valve Corp.
3 * SPDX-License-Identifier: MIT
4 */
5
6 #include "nvk_device.h"
7 #include "nvk_device_memory.h"
8 #include "nvk_entrypoints.h"
9 #include "nvk_format.h"
10 #include "nvk_image.h"
11
12 #include "vk_format.h"
13
14 static struct nil_Offset4D_Pixels
vk_to_nil_offset(const struct vk_image * image,VkOffset3D offset,uint32_t base_array_layer)15 vk_to_nil_offset(const struct vk_image *image, VkOffset3D offset,
16 uint32_t base_array_layer)
17 {
18 const VkOffset3D sanitized_offset =
19 vk_image_sanitize_offset(image, offset);
20 return (struct nil_Offset4D_Pixels) {
21 .x = sanitized_offset.x,
22 .y = sanitized_offset.y,
23 .z = sanitized_offset.z,
24 .a = base_array_layer
25 };
26 }
27
28 static struct nil_Extent4D_Pixels
vk_to_nil_extent(const struct vk_image * image,VkExtent3D extent,uint32_t array_layers)29 vk_to_nil_extent(const struct vk_image *image, VkExtent3D extent,
30 uint32_t array_layers)
31 {
32 const VkExtent3D sanitized_extent =
33 vk_image_sanitize_extent(image, extent);
34 return (struct nil_Extent4D_Pixels) {
35 .width = sanitized_extent.width,
36 .height = sanitized_extent.height,
37 .depth = sanitized_extent.depth,
38 .array_len = array_layers,
39 };
40 }
41
42 static void
memcpy2d(void * dst,size_t dst_stride_B,const void * src,size_t src_stride_B,size_t width_B,size_t height)43 memcpy2d(void *dst, size_t dst_stride_B,
44 const void *src, size_t src_stride_B,
45 size_t width_B, size_t height)
46 {
47 if (dst_stride_B == width_B && src_stride_B == width_B) {
48 memcpy(dst, src, width_B * height);
49 } else {
50 for (uint32_t y = 0; y < height; y++) {
51 memcpy(dst, src, width_B);
52 dst += dst_stride_B;
53 src += src_stride_B;
54 }
55 }
56 }
57
58 static VkResult
nvk_image_plane_map(const struct nvk_image_plane * plane,enum nvkmd_mem_map_flags map_flags,void ** map_out)59 nvk_image_plane_map(const struct nvk_image_plane *plane,
60 enum nvkmd_mem_map_flags map_flags,
61 void **map_out)
62 {
63 struct nvk_device_memory *host_mem = plane->host_mem;
64 VkResult result;
65
66 result = nvkmd_mem_map(host_mem->mem, &host_mem->vk.base,
67 map_flags, NULL, map_out);
68 if (result != VK_SUCCESS)
69 return result;
70
71 *map_out += plane->host_offset;
72
73 return VK_SUCCESS;
74 }
75
76 static void
nvk_image_plane_unmap(const struct nvk_image_plane * plane)77 nvk_image_plane_unmap(const struct nvk_image_plane *plane)
78 {
79 nvkmd_mem_unmap(plane->host_mem->mem, 0);
80 }
81
82 static VkResult
nvk_copy_memory_to_image(struct nvk_image * dst,const VkMemoryToImageCopyEXT * info,bool use_memcpy)83 nvk_copy_memory_to_image(struct nvk_image *dst,
84 const VkMemoryToImageCopyEXT *info,
85 bool use_memcpy)
86 {
87 VkResult result;
88
89 struct vk_image_buffer_layout buffer_layout =
90 vk_memory_to_image_copy_layout(&dst->vk, info);
91
92 const VkImageAspectFlagBits aspects = info->imageSubresource.aspectMask;
93 const uint8_t plane = nvk_image_aspects_to_plane(dst, aspects);
94 const struct nvk_image_plane *dst_plane = &dst->planes[plane];
95
96 const uint32_t layer_count =
97 vk_image_subresource_layer_count(&dst->vk, &info->imageSubresource);
98 const struct nil_Extent4D_Pixels extent_px =
99 vk_to_nil_extent(&dst->vk, info->imageExtent, layer_count);
100 const struct nil_Extent4D_Bytes extent_B =
101 nil_extent4d_px_to_B(extent_px, dst_plane->nil.format,
102 dst_plane->nil.sample_layout);
103
104 const struct nil_Offset4D_Pixels offset_px =
105 vk_to_nil_offset(&dst->vk, info->imageOffset,
106 info->imageSubresource.baseArrayLayer);
107 struct nil_Offset4D_Bytes offset_B =
108 nil_offset4d_px_to_B(offset_px, dst_plane->nil.format,
109 dst_plane->nil.sample_layout);
110
111 const uint32_t dst_miplevel = info->imageSubresource.mipLevel;
112 const struct nil_image_level *dst_level =
113 &dst_plane->nil.levels[dst_miplevel];
114
115 const void *src_ptr = info->pHostPointer;
116
117 void *dst_ptr;
118 result = nvk_image_plane_map(dst_plane, NVKMD_MEM_MAP_WR, &dst_ptr);
119 if (result != VK_SUCCESS)
120 return result;
121
122 /* Take into account the miplevel and array layer */
123 dst_ptr += dst_level->offset_B;
124 dst_ptr += offset_B.a * dst_plane->nil.array_stride_B;
125 offset_B.a = 0;
126
127 if (use_memcpy) {
128 const uint64_t layer_size_B =
129 nil_image_level_layer_size_B(&dst_plane->nil, dst_miplevel);
130 for (unsigned a = 0; a < layer_count; a++) {
131 memcpy(dst_ptr, src_ptr, layer_size_B);
132
133 src_ptr += layer_size_B;
134 dst_ptr += dst_plane->nil.array_stride_B;
135 }
136 } else if (dst_level->tiling.gob_type == NIL_GOB_TYPE_LINEAR) {
137 assert(layer_count == 1);
138 memcpy2d(dst_ptr + offset_B.x + offset_B.y * dst_level->row_stride_B,
139 dst_level->row_stride_B,
140 src_ptr,
141 buffer_layout.row_stride_B,
142 extent_B.width,
143 extent_B.height);
144 } else {
145 const struct nil_Extent4D_Pixels level_extent_px =
146 nil_image_level_extent_px(&dst_plane->nil, dst_miplevel);
147 struct nil_Extent4D_Bytes level_extent_B =
148 nil_extent4d_px_to_B(level_extent_px, dst_plane->nil.format,
149 dst_plane->nil.sample_layout);
150 level_extent_B.array_len = 1;
151
152 /* The copy works one array layer at a time */
153 assert(offset_B.a == 0);
154 struct nil_Extent4D_Bytes copy_extent_B = extent_B;
155 copy_extent_B.array_len = 1;
156
157 for (unsigned a = 0; a < layer_count; a++) {
158 nil_copy_linear_to_tiled(dst_ptr,
159 level_extent_B,
160 src_ptr,
161 buffer_layout.row_stride_B,
162 buffer_layout.image_stride_B,
163 offset_B,
164 copy_extent_B,
165 &dst_level->tiling);
166
167 src_ptr += buffer_layout.image_stride_B;
168 dst_ptr += dst_plane->nil.array_stride_B;
169 }
170 }
171
172 nvk_image_plane_unmap(dst_plane);
173
174 return VK_SUCCESS;
175 }
176
177 VKAPI_ATTR VkResult VKAPI_CALL
nvk_CopyMemoryToImageEXT(VkDevice _device,const VkCopyMemoryToImageInfoEXT * info)178 nvk_CopyMemoryToImageEXT(VkDevice _device,
179 const VkCopyMemoryToImageInfoEXT *info)
180 {
181 VK_FROM_HANDLE(nvk_image, dst_image, info->dstImage);
182
183 VkResult result = VK_SUCCESS;
184
185 /* From the EXT spec:
186 * VK_HOST_IMAGE_COPY_MEMCPY_EXT specifies that no memory layout swizzling is
187 * to be applied during data copy. For copies between memory and images, this
188 * flag indicates that image data in host memory is swizzled in exactly the
189 * same way as the image data on the device. Using this flag indicates that
190 * the implementations may use a simple memory copy to transfer the data
191 * between the host memory and the device memory. The format of the swizzled
192 * data in host memory is platform dependent and is not defined in this
193 * specification.
194 */
195 const bool use_memcpy = info->flags &
196 VK_HOST_IMAGE_COPY_MEMCPY_EXT;
197
198 for (unsigned r = 0; r < info->regionCount; r++) {
199 result = nvk_copy_memory_to_image(dst_image, &info->pRegions[r],
200 use_memcpy);
201 if (result != VK_SUCCESS)
202 return result;
203 }
204
205 return result;
206 }
207
208 static VkResult
nvk_copy_image_to_memory(struct nvk_image * src,const VkImageToMemoryCopyEXT * info,bool use_memcpy)209 nvk_copy_image_to_memory(struct nvk_image *src,
210 const VkImageToMemoryCopyEXT *info,
211 bool use_memcpy)
212 {
213 VkResult result;
214
215 struct vk_image_buffer_layout buffer_layout =
216 vk_image_to_memory_copy_layout(&src->vk, info);
217
218 const VkImageAspectFlagBits aspects = info->imageSubresource.aspectMask;
219 const uint8_t plane = nvk_image_aspects_to_plane(src, aspects);
220 struct nvk_image_plane *src_plane = &src->planes[plane];
221
222 const uint32_t layer_count =
223 vk_image_subresource_layer_count(&src->vk, &info->imageSubresource);
224 const struct nil_Extent4D_Pixels extent_px =
225 vk_to_nil_extent(&src->vk, info->imageExtent, layer_count);
226 const struct nil_Extent4D_Bytes extent_B =
227 nil_extent4d_px_to_B(extent_px, src_plane->nil.format,
228 src_plane->nil.sample_layout);
229
230 const struct nil_Offset4D_Pixels offset_px =
231 vk_to_nil_offset(&src->vk, info->imageOffset,
232 info->imageSubresource.baseArrayLayer);
233 struct nil_Offset4D_Bytes offset_B =
234 nil_offset4d_px_to_B(offset_px, src_plane->nil.format,
235 src_plane->nil.sample_layout);
236
237 const uint32_t src_miplevel = info->imageSubresource.mipLevel;
238 const struct nil_image_level *src_level =
239 &src_plane->nil.levels[src_miplevel];
240
241 void *dst_ptr = info->pHostPointer;
242
243 const void *src_ptr;
244 result = nvk_image_plane_map(src_plane, NVKMD_MEM_MAP_RD, (void **)&src_ptr);
245 if (result != VK_SUCCESS)
246 return result;
247
248 /* Take into account the miplevel and array layer */
249 src_ptr += src_level->offset_B;
250 src_ptr += offset_B.a * src_plane->nil.array_stride_B;
251 offset_B.a = 0;
252
253 if (use_memcpy) {
254 const uint64_t layer_size_B =
255 nil_image_level_layer_size_B(&src_plane->nil, src_miplevel);
256 for (unsigned a = 0; a < layer_count; a++) {
257 memcpy(dst_ptr, src_ptr, layer_size_B);
258
259 src_ptr += src_plane->nil.array_stride_B;
260 dst_ptr += layer_size_B;
261 }
262 } else if (src_level->tiling.gob_type == NIL_GOB_TYPE_LINEAR) {
263 assert(layer_count == 1);
264 memcpy2d(dst_ptr,
265 buffer_layout.row_stride_B,
266 src_ptr + offset_B.x + offset_B.y * src_level->row_stride_B,
267 src_level->row_stride_B,
268 extent_B.width,
269 extent_B.height);
270 } else {
271 const struct nil_Extent4D_Pixels level_extent_px =
272 nil_image_level_extent_px(&src_plane->nil, src_miplevel);
273 struct nil_Extent4D_Bytes level_extent_B =
274 nil_extent4d_px_to_B(level_extent_px, src_plane->nil.format,
275 src_plane->nil.sample_layout);
276 level_extent_B.array_len = 1;
277
278 /* The copy works one array layer at a time */
279 assert(offset_B.a == 0);
280 struct nil_Extent4D_Bytes copy_extent_B = extent_B;
281 copy_extent_B.array_len = 1;
282
283 for (unsigned a = 0; a < layer_count; a++) {
284 nil_copy_tiled_to_linear(dst_ptr,
285 buffer_layout.row_stride_B,
286 buffer_layout.image_stride_B,
287 src_ptr,
288 level_extent_B,
289 offset_B,
290 copy_extent_B,
291 &src_level->tiling);
292
293 src_ptr += src_plane->nil.array_stride_B;
294 dst_ptr += buffer_layout.image_stride_B;
295 }
296 }
297
298 nvk_image_plane_unmap(src_plane);
299
300 return VK_SUCCESS;
301 }
302
303 VKAPI_ATTR VkResult VKAPI_CALL
nvk_CopyImageToMemoryEXT(VkDevice _device,const VkCopyImageToMemoryInfoEXT * info)304 nvk_CopyImageToMemoryEXT(VkDevice _device,
305 const VkCopyImageToMemoryInfoEXT *info)
306 {
307 VK_FROM_HANDLE(nvk_image, image, info->srcImage);
308
309 VkResult result = VK_SUCCESS;
310
311 const bool use_memcpy = info->flags &
312 VK_HOST_IMAGE_COPY_MEMCPY_EXT;
313
314 for (unsigned r = 0; r < info->regionCount; r++) {
315 result = nvk_copy_image_to_memory(image, &info->pRegions[r],
316 use_memcpy);
317 if (result != VK_SUCCESS)
318 return result;
319 }
320
321 return result;
322 }
323
324 static VkResult
nvk_copy_image_to_image(struct nvk_device * dev,struct nvk_image * src,struct nvk_image * dst,const VkImageCopy2 * info)325 nvk_copy_image_to_image(struct nvk_device *dev,
326 struct nvk_image *src,
327 struct nvk_image *dst,
328 const VkImageCopy2 *info)
329 {
330 VkResult result;
331
332 const VkImageAspectFlagBits src_aspects =
333 info->srcSubresource.aspectMask;
334 const uint8_t src_plane = nvk_image_aspects_to_plane(src, src_aspects);
335 struct nvk_image_plane *src_img_plane = &src->planes[src_plane];
336
337 const VkImageAspectFlagBits dst_aspects =
338 info->dstSubresource.aspectMask;
339 const uint8_t dst_plane = nvk_image_aspects_to_plane(dst, dst_aspects);
340 struct nvk_image_plane *dst_img_plane = &dst->planes[dst_plane];
341
342 /* From the Vulkan 1.3.217 spec:
343 *
344 * "When copying between compressed and uncompressed formats the
345 * extent members represent the texel dimensions of the source image
346 * and not the destination."
347 */
348 const uint32_t src_layer_count =
349 vk_image_subresource_layer_count(&src->vk, &info->srcSubresource);
350 const struct nil_Extent4D_Pixels src_extent_px =
351 vk_to_nil_extent(&src->vk, info->extent, src_layer_count);
352 struct nil_Extent4D_Bytes src_extent_B =
353 nil_extent4d_px_to_B(src_extent_px, src_img_plane->nil.format,
354 src_img_plane->nil.sample_layout);
355
356 const struct nil_Offset4D_Pixels src_offset_px =
357 vk_to_nil_offset(&src->vk, info->srcOffset,
358 info->srcSubresource.baseArrayLayer);
359 struct nil_Offset4D_Bytes src_offset_B =
360 nil_offset4d_px_to_B(src_offset_px, src_img_plane->nil.format,
361 src_img_plane->nil.sample_layout);
362
363 const uint32_t dst_layer_count =
364 vk_image_subresource_layer_count(&dst->vk, &info->dstSubresource);
365 const struct nil_Extent4D_Pixels dst_extent_px =
366 vk_to_nil_extent(&dst->vk, info->extent, dst_layer_count);
367 // The source format is here in case of compressed images (see comment above)
368 struct nil_Extent4D_Bytes dst_extent_B =
369 nil_extent4d_px_to_B(dst_extent_px, src_img_plane->nil.format,
370 dst_img_plane->nil.sample_layout);
371
372 const struct nil_Offset4D_Pixels dst_offset_px =
373 vk_to_nil_offset(&dst->vk, info->dstOffset,
374 info->dstSubresource.baseArrayLayer);
375 struct nil_Offset4D_Bytes dst_offset_B =
376 nil_offset4d_px_to_B(dst_offset_px, dst_img_plane->nil.format,
377 dst_img_plane->nil.sample_layout);
378
379 const uint32_t src_miplevel = info->srcSubresource.mipLevel;
380 const struct nil_image_level *src_level =
381 &src_img_plane->nil.levels[src_miplevel];
382
383 const uint32_t dst_miplevel = info->dstSubresource.mipLevel;
384 const struct nil_image_level *dst_level =
385 &dst_img_plane->nil.levels[dst_miplevel];
386
387 const void *src_ptr;
388 result = nvk_image_plane_map(src_img_plane, NVKMD_MEM_MAP_RD,
389 (void **)&src_ptr);
390 if (result != VK_SUCCESS)
391 return result;
392
393 /* Take into account the miplevel and array layer */
394 src_ptr += src_level->offset_B;
395 src_ptr += src_offset_B.a * src_img_plane->nil.array_stride_B;
396 src_offset_B.a = 0;
397
398 void *dst_ptr;
399 result = nvk_image_plane_map(dst_img_plane, NVKMD_MEM_MAP_WR, &dst_ptr);
400 if (result != VK_SUCCESS)
401 return result;
402
403 /* Take into account the miplevel and array layer */
404 dst_ptr += dst_level->offset_B;
405 dst_ptr += dst_offset_B.a * dst_img_plane->nil.array_stride_B;
406 dst_offset_B.a = 0;
407
408 if (src_level->tiling.gob_type == NIL_GOB_TYPE_LINEAR) {
409 assert(src_img_plane->nil.dim == NIL_IMAGE_DIM_2D);
410 assert(src_img_plane->nil.extent_px.array_len == 1);
411 assert(src_extent_px.depth == 1 && src_extent_px.array_len == 1);
412 }
413
414 if (dst_level->tiling.gob_type == NIL_GOB_TYPE_LINEAR) {
415 assert(dst_img_plane->nil.dim == NIL_IMAGE_DIM_2D);
416 assert(dst_img_plane->nil.extent_px.array_len == 1);
417 assert(dst_extent_px.depth == 1 && dst_extent_px.array_len == 1);
418 }
419
420 if (src_level->tiling.gob_type == NIL_GOB_TYPE_LINEAR &&
421 dst_level->tiling.gob_type == NIL_GOB_TYPE_LINEAR) {
422 memcpy2d(dst_ptr + dst_offset_B.x +
423 dst_offset_B.y * dst_level->row_stride_B,
424 dst_level->row_stride_B,
425 src_ptr + src_offset_B.x +
426 src_offset_B.y * src_level->row_stride_B,
427 src_level->row_stride_B,
428 src_extent_B.width,
429 src_extent_B.height);
430 } else if (src_level->tiling.gob_type == NIL_GOB_TYPE_LINEAR) {
431 const struct nil_Extent4D_Pixels dst_level_extent_px =
432 nil_image_level_extent_px(&dst_img_plane->nil, dst_miplevel);
433 const struct nil_Extent4D_Bytes dst_level_extent_B =
434 nil_extent4d_px_to_B(dst_level_extent_px, dst_img_plane->nil.format,
435 dst_img_plane->nil.sample_layout);
436
437 nil_copy_linear_to_tiled(dst_ptr,
438 dst_level_extent_B,
439 src_ptr + src_offset_B.x +
440 src_offset_B.y * src_level->row_stride_B,
441 src_level->row_stride_B,
442 0, /* No array slices */
443 dst_offset_B,
444 dst_extent_B,
445 &dst_level->tiling);
446 } else if (dst_level->tiling.gob_type == NIL_GOB_TYPE_LINEAR) {
447 const struct nil_Extent4D_Pixels src_level_extent_px =
448 nil_image_level_extent_px(&src_img_plane->nil, src_miplevel);
449 const struct nil_Extent4D_Bytes src_level_extent_B =
450 nil_extent4d_px_to_B(src_level_extent_px, src_img_plane->nil.format,
451 src_img_plane->nil.sample_layout);
452
453 nil_copy_tiled_to_linear(dst_ptr + dst_offset_B.x +
454 dst_offset_B.y * dst_level->row_stride_B,
455 dst_level->row_stride_B,
456 0, /* No array slices */
457 src_ptr,
458 src_level_extent_B,
459 src_offset_B,
460 src_extent_B,
461 &src_level->tiling);
462 } else {
463 assert(src_extent_B.width == dst_extent_B.width);
464 assert(src_extent_B.height == dst_extent_B.height);
465 const uint32_t tmp_row_stride_B = src_extent_B.width;
466 const uint32_t tmp_layer_stride_B = src_extent_B.width *
467 src_extent_B.height;
468
469 assert(src_extent_B.depth * src_extent_B.array_len ==
470 dst_extent_B.depth * dst_extent_B.array_len);
471 const size_t tmp_size_B =
472 src_extent_B.depth * src_extent_B.array_len * tmp_layer_stride_B;
473 void *tmp_mem = vk_alloc(&dev->vk.alloc, tmp_size_B, 8,
474 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
475
476 const struct nil_Extent4D_Pixels src_level_extent_px =
477 nil_image_level_extent_px(&src_img_plane->nil, src_miplevel);
478 struct nil_Extent4D_Bytes src_level_extent_B =
479 nil_extent4d_px_to_B(src_level_extent_px, src_img_plane->nil.format,
480 src_img_plane->nil.sample_layout);
481 src_level_extent_B.array_len = 1;
482
483 /* The copy works one array layer at a time */
484 assert(src_offset_B.a == 0);
485 struct nil_Extent4D_Bytes src_copy_extent_B = src_extent_B;
486 src_copy_extent_B.array_len = 1;
487
488 void *tmp_dst = tmp_mem;
489 for (unsigned a = 0; a < src_layer_count; a++) {
490 nil_copy_tiled_to_linear(tmp_dst,
491 tmp_row_stride_B,
492 tmp_layer_stride_B,
493 src_ptr,
494 src_level_extent_B,
495 src_offset_B,
496 src_copy_extent_B,
497 &src_level->tiling);
498
499 src_ptr += src_img_plane->nil.array_stride_B;
500 tmp_dst += tmp_layer_stride_B;
501 }
502
503 const struct nil_Extent4D_Pixels dst_level_extent_px =
504 nil_image_level_extent_px(&dst_img_plane->nil, dst_miplevel);
505 struct nil_Extent4D_Bytes dst_level_extent_B =
506 nil_extent4d_px_to_B(dst_level_extent_px, dst_img_plane->nil.format,
507 dst_img_plane->nil.sample_layout);
508 dst_level_extent_B.array_len = 1;
509
510 /* The copy works one array layer at a time */
511 assert(dst_offset_B.a == 0);
512 struct nil_Extent4D_Bytes dst_copy_extent_B = dst_extent_B;
513 dst_copy_extent_B.array_len = 1;
514
515 void *tmp_src = tmp_mem;
516 for (unsigned a = 0; a < dst_layer_count; a++) {
517 nil_copy_linear_to_tiled(dst_ptr,
518 dst_level_extent_B,
519 tmp_src,
520 tmp_row_stride_B,
521 tmp_layer_stride_B,
522 dst_offset_B,
523 dst_copy_extent_B,
524 &dst_level->tiling);
525
526 tmp_src += tmp_layer_stride_B;
527 dst_ptr += dst_img_plane->nil.array_stride_B;
528 }
529
530 vk_free(&dev->vk.alloc, tmp_mem);
531 }
532
533 nvk_image_plane_unmap(src_img_plane);
534 nvk_image_plane_unmap(dst_img_plane);
535
536 return VK_SUCCESS;
537 }
538
539 VKAPI_ATTR VkResult VKAPI_CALL
nvk_CopyImageToImageEXT(VkDevice _device,const VkCopyImageToImageInfoEXT * pCopyImageToImageInfo)540 nvk_CopyImageToImageEXT(VkDevice _device,
541 const VkCopyImageToImageInfoEXT *pCopyImageToImageInfo)
542 {
543 VK_FROM_HANDLE(nvk_device, device, _device);
544 VK_FROM_HANDLE(nvk_image, src, pCopyImageToImageInfo->srcImage);
545 VK_FROM_HANDLE(nvk_image, dst, pCopyImageToImageInfo->dstImage);
546
547 VkResult result = VK_SUCCESS;
548
549 for (unsigned r = 0; r < pCopyImageToImageInfo->regionCount; r++) {
550 result = nvk_copy_image_to_image(device, src, dst,
551 pCopyImageToImageInfo->pRegions + r);
552 if (result != VK_SUCCESS)
553 return result;
554 }
555
556 return result;
557 }
558
559
560 VKAPI_ATTR VkResult VKAPI_CALL
nvk_TransitionImageLayoutEXT(VkDevice device,uint32_t transitionCount,const VkHostImageLayoutTransitionInfoEXT * transitions)561 nvk_TransitionImageLayoutEXT(VkDevice device,
562 uint32_t transitionCount,
563 const VkHostImageLayoutTransitionInfoEXT *transitions)
564 {
565 /* Nothing to do here */
566 return VK_SUCCESS;
567 }
568