1 /*
2 * Copyright © 2019 Raspberry Pi Ltd
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "v3dv_private.h"
25
26 #include "drm-uapi/drm_fourcc.h"
27 #include "util/format/u_format.h"
28 #include "util/u_math.h"
29 #include "vk_util.h"
30 #include "vulkan/wsi/wsi_common.h"
31
32 /**
33 * Computes the HW's UIFblock padding for a given height/cpp.
34 *
35 * The goal of the padding is to keep pages of the same color (bank number) at
36 * least half a page away from each other vertically when crossing between
37 * columns of UIF blocks.
38 */
39 static uint32_t
v3d_get_ub_pad(uint32_t cpp,uint32_t height)40 v3d_get_ub_pad(uint32_t cpp, uint32_t height)
41 {
42 uint32_t utile_h = v3d_utile_height(cpp);
43 uint32_t uif_block_h = utile_h * 2;
44 uint32_t height_ub = height / uif_block_h;
45
46 uint32_t height_offset_in_pc = height_ub % PAGE_CACHE_UB_ROWS;
47
48 /* For the perfectly-aligned-for-UIF-XOR case, don't add any pad. */
49 if (height_offset_in_pc == 0)
50 return 0;
51
52 /* Try padding up to where we're offset by at least half a page. */
53 if (height_offset_in_pc < PAGE_UB_ROWS_TIMES_1_5) {
54 /* If we fit entirely in the page cache, don't pad. */
55 if (height_ub < PAGE_CACHE_UB_ROWS)
56 return 0;
57 else
58 return PAGE_UB_ROWS_TIMES_1_5 - height_offset_in_pc;
59 }
60
61 /* If we're close to being aligned to page cache size, then round up
62 * and rely on XOR.
63 */
64 if (height_offset_in_pc > PAGE_CACHE_MINUS_1_5_UB_ROWS)
65 return PAGE_CACHE_UB_ROWS - height_offset_in_pc;
66
67 /* Otherwise, we're far enough away (top and bottom) to not need any
68 * padding.
69 */
70 return 0;
71 }
72
73 static void
v3d_setup_slices(struct v3dv_image * image)74 v3d_setup_slices(struct v3dv_image *image)
75 {
76 assert(image->cpp > 0);
77
78 uint32_t width = image->vk.extent.width;
79 uint32_t height = image->vk.extent.height;
80 uint32_t depth = image->vk.extent.depth;
81
82 /* Note that power-of-two padding is based on level 1. These are not
83 * equivalent to just util_next_power_of_two(dimension), because at a
84 * level 0 dimension of 9, the level 1 power-of-two padded value is 4,
85 * not 8.
86 */
87 uint32_t pot_width = 2 * util_next_power_of_two(u_minify(width, 1));
88 uint32_t pot_height = 2 * util_next_power_of_two(u_minify(height, 1));
89 uint32_t pot_depth = 2 * util_next_power_of_two(u_minify(depth, 1));
90
91 uint32_t utile_w = v3d_utile_width(image->cpp);
92 uint32_t utile_h = v3d_utile_height(image->cpp);
93 uint32_t uif_block_w = utile_w * 2;
94 uint32_t uif_block_h = utile_h * 2;
95
96 uint32_t block_width = vk_format_get_blockwidth(image->vk.format);
97 uint32_t block_height = vk_format_get_blockheight(image->vk.format);
98
99 assert(image->vk.samples == VK_SAMPLE_COUNT_1_BIT ||
100 image->vk.samples == VK_SAMPLE_COUNT_4_BIT);
101 bool msaa = image->vk.samples != VK_SAMPLE_COUNT_1_BIT;
102
103 bool uif_top = msaa;
104
105 assert(image->vk.array_layers > 0);
106 assert(depth > 0);
107 assert(image->vk.mip_levels >= 1);
108
109 uint32_t offset = 0;
110 for (int32_t i = image->vk.mip_levels - 1; i >= 0; i--) {
111 struct v3d_resource_slice *slice = &image->slices[i];
112
113 uint32_t level_width, level_height, level_depth;
114 if (i < 2) {
115 level_width = u_minify(width, i);
116 level_height = u_minify(height, i);
117 } else {
118 level_width = u_minify(pot_width, i);
119 level_height = u_minify(pot_height, i);
120 }
121
122 if (i < 1)
123 level_depth = u_minify(depth, i);
124 else
125 level_depth = u_minify(pot_depth, i);
126
127 if (msaa) {
128 level_width *= 2;
129 level_height *= 2;
130 }
131
132 level_width = DIV_ROUND_UP(level_width, block_width);
133 level_height = DIV_ROUND_UP(level_height, block_height);
134
135 if (!image->tiled) {
136 slice->tiling = V3D_TILING_RASTER;
137 if (image->vk.image_type == VK_IMAGE_TYPE_1D)
138 level_width = align(level_width, 64 / image->cpp);
139 } else {
140 if ((i != 0 || !uif_top) &&
141 (level_width <= utile_w || level_height <= utile_h)) {
142 slice->tiling = V3D_TILING_LINEARTILE;
143 level_width = align(level_width, utile_w);
144 level_height = align(level_height, utile_h);
145 } else if ((i != 0 || !uif_top) && level_width <= uif_block_w) {
146 slice->tiling = V3D_TILING_UBLINEAR_1_COLUMN;
147 level_width = align(level_width, uif_block_w);
148 level_height = align(level_height, uif_block_h);
149 } else if ((i != 0 || !uif_top) && level_width <= 2 * uif_block_w) {
150 slice->tiling = V3D_TILING_UBLINEAR_2_COLUMN;
151 level_width = align(level_width, 2 * uif_block_w);
152 level_height = align(level_height, uif_block_h);
153 } else {
154 /* We align the width to a 4-block column of UIF blocks, but we
155 * only align height to UIF blocks.
156 */
157 level_width = align(level_width, 4 * uif_block_w);
158 level_height = align(level_height, uif_block_h);
159
160 slice->ub_pad = v3d_get_ub_pad(image->cpp, level_height);
161 level_height += slice->ub_pad * uif_block_h;
162
163 /* If the padding set us to to be aligned to the page cache size,
164 * then the HW will use the XOR bit on odd columns to get us
165 * perfectly misaligned.
166 */
167 if ((level_height / uif_block_h) %
168 (V3D_PAGE_CACHE_SIZE / V3D_UIFBLOCK_ROW_SIZE) == 0) {
169 slice->tiling = V3D_TILING_UIF_XOR;
170 } else {
171 slice->tiling = V3D_TILING_UIF_NO_XOR;
172 }
173 }
174 }
175
176 slice->offset = offset;
177 slice->stride = level_width * image->cpp;
178 slice->padded_height = level_height;
179 if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
180 slice->tiling == V3D_TILING_UIF_XOR) {
181 slice->padded_height_of_output_image_in_uif_blocks =
182 slice->padded_height / (2 * v3d_utile_height(image->cpp));
183 }
184
185 slice->size = level_height * slice->stride;
186 uint32_t slice_total_size = slice->size * level_depth;
187
188 /* The HW aligns level 1's base to a page if any of level 1 or
189 * below could be UIF XOR. The lower levels then inherit the
190 * alignment for as long as necesary, thanks to being power of
191 * two aligned.
192 */
193 if (i == 1 &&
194 level_width > 4 * uif_block_w &&
195 level_height > PAGE_CACHE_MINUS_1_5_UB_ROWS * uif_block_h) {
196 slice_total_size = align(slice_total_size, V3D_UIFCFG_PAGE_SIZE);
197 }
198
199 offset += slice_total_size;
200 }
201
202 image->size = offset;
203
204 /* UIF/UBLINEAR levels need to be aligned to UIF-blocks, and LT only
205 * needs to be aligned to utile boundaries. Since tiles are laid out
206 * from small to big in memory, we need to align the later UIF slices
207 * to UIF blocks, if they were preceded by non-UIF-block-aligned LT
208 * slices.
209 *
210 * We additionally align to 4k, which improves UIF XOR performance.
211 */
212 image->alignment = image->tiled ? 4096 : image->cpp;
213 uint32_t align_offset =
214 align(image->slices[0].offset, image->alignment) - image->slices[0].offset;
215 if (align_offset) {
216 image->size += align_offset;
217 for (int i = 0; i < image->vk.mip_levels; i++)
218 image->slices[i].offset += align_offset;
219 }
220
221 /* Arrays and cube textures have a stride which is the distance from
222 * one full mipmap tree to the next (64b aligned). For 3D textures,
223 * we need to program the stride between slices of miplevel 0.
224 */
225 if (image->vk.image_type != VK_IMAGE_TYPE_3D) {
226 image->cube_map_stride =
227 align(image->slices[0].offset + image->slices[0].size, 64);
228 image->size += image->cube_map_stride * (image->vk.array_layers - 1);
229 } else {
230 image->cube_map_stride = image->slices[0].size;
231 }
232 }
233
234 uint32_t
v3dv_layer_offset(const struct v3dv_image * image,uint32_t level,uint32_t layer)235 v3dv_layer_offset(const struct v3dv_image *image, uint32_t level, uint32_t layer)
236 {
237 const struct v3d_resource_slice *slice = &image->slices[level];
238
239 if (image->vk.image_type == VK_IMAGE_TYPE_3D)
240 return image->mem_offset + slice->offset + layer * slice->size;
241 else
242 return image->mem_offset + slice->offset + layer * image->cube_map_stride;
243 }
244
245 static VkResult
create_image(struct v3dv_device * device,const VkImageCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImage * pImage)246 create_image(struct v3dv_device *device,
247 const VkImageCreateInfo *pCreateInfo,
248 const VkAllocationCallbacks *pAllocator,
249 VkImage *pImage)
250 {
251 struct v3dv_image *image = NULL;
252
253 image = vk_image_create(&device->vk, pCreateInfo, pAllocator, sizeof(*image));
254 if (image == NULL)
255 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
256
257 /* When using the simulator the WSI common code will see that our
258 * driver wsi device doesn't match the display device and because of that
259 * it will not attempt to present directly from the swapchain images,
260 * instead it will use the prime blit path (use_buffer_blit flag in
261 * struct wsi_swapchain), where it copies the contents of the swapchain
262 * images to a linear buffer with appropriate row stride for presentation.
263 * As a result, on that path, swapchain images do not have any special
264 * requirements and are not created with the pNext structs below.
265 */
266 VkImageTiling tiling = pCreateInfo->tiling;
267 uint64_t modifier = DRM_FORMAT_MOD_INVALID;
268 if (tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
269 const VkImageDrmFormatModifierListCreateInfoEXT *mod_info =
270 vk_find_struct_const(pCreateInfo->pNext,
271 IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT);
272 const VkImageDrmFormatModifierExplicitCreateInfoEXT *explicit_mod_info =
273 vk_find_struct_const(pCreateInfo->pNext,
274 IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT);
275 assert(mod_info || explicit_mod_info);
276
277 if (mod_info) {
278 for (uint32_t i = 0; i < mod_info->drmFormatModifierCount; i++) {
279 switch (mod_info->pDrmFormatModifiers[i]) {
280 case DRM_FORMAT_MOD_LINEAR:
281 if (modifier == DRM_FORMAT_MOD_INVALID)
282 modifier = DRM_FORMAT_MOD_LINEAR;
283 break;
284 case DRM_FORMAT_MOD_BROADCOM_UIF:
285 modifier = DRM_FORMAT_MOD_BROADCOM_UIF;
286 break;
287 }
288 }
289 } else {
290 modifier = explicit_mod_info->drmFormatModifier;
291 }
292 assert(modifier == DRM_FORMAT_MOD_LINEAR ||
293 modifier == DRM_FORMAT_MOD_BROADCOM_UIF);
294 } else if (pCreateInfo->imageType == VK_IMAGE_TYPE_1D ||
295 image->vk.wsi_legacy_scanout) {
296 tiling = VK_IMAGE_TILING_LINEAR;
297 }
298
299 #ifdef ANDROID
300 const VkNativeBufferANDROID *native_buffer =
301 vk_find_struct_const(pCreateInfo->pNext, NATIVE_BUFFER_ANDROID);
302
303 int native_buf_fd = -1;
304 int native_buf_stride = 0;
305 int native_buf_size = 0;
306
307 if (native_buffer != NULL) {
308 VkResult result = v3dv_gralloc_info(device, native_buffer, &native_buf_fd,
309 &native_buf_stride, &native_buf_size, &modifier);
310 if (result != VK_SUCCESS) {
311 vk_image_destroy(&device->vk, pAllocator, &image->vk);
312 return result;
313 }
314
315 if (modifier != DRM_FORMAT_MOD_BROADCOM_UIF)
316 tiling = VK_IMAGE_TILING_LINEAR;
317 }
318 #endif
319
320 const struct v3dv_format *format =
321 v3dv_X(device, get_format)(pCreateInfo->format);
322 v3dv_assert(format != NULL && format->supported);
323
324 assert(pCreateInfo->samples == VK_SAMPLE_COUNT_1_BIT ||
325 pCreateInfo->samples == VK_SAMPLE_COUNT_4_BIT);
326
327 image->format = format;
328 image->cpp = vk_format_get_blocksize(image->vk.format);
329 image->tiled = tiling == VK_IMAGE_TILING_OPTIMAL ||
330 (tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT &&
331 modifier != DRM_FORMAT_MOD_LINEAR);
332
333 image->vk.tiling = tiling;
334 image->vk.drm_format_mod = modifier;
335
336 /* Our meta paths can create image views with compatible formats for any
337 * image, so always set this flag to keep the common Vulkan image code
338 * happy.
339 */
340 image->vk.create_flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
341
342 v3d_setup_slices(image);
343
344 #ifdef ANDROID
345 if (native_buffer != NULL) {
346 image->slices[0].stride = native_buf_stride;
347 image->slices[0].size = image->size = native_buf_size;
348
349 VkResult result = v3dv_import_native_buffer_fd(v3dv_device_to_handle(device),
350 native_buf_fd, pAllocator,
351 v3dv_image_to_handle(image));
352 if (result != VK_SUCCESS) {
353 vk_object_free(&device->vk, pAllocator, image);
354 return result;
355 }
356 }
357 #endif
358
359 *pImage = v3dv_image_to_handle(image);
360
361 return VK_SUCCESS;
362 }
363
364 static VkResult
create_image_from_swapchain(struct v3dv_device * device,const VkImageCreateInfo * pCreateInfo,const VkImageSwapchainCreateInfoKHR * swapchain_info,const VkAllocationCallbacks * pAllocator,VkImage * pImage)365 create_image_from_swapchain(struct v3dv_device *device,
366 const VkImageCreateInfo *pCreateInfo,
367 const VkImageSwapchainCreateInfoKHR *swapchain_info,
368 const VkAllocationCallbacks *pAllocator,
369 VkImage *pImage)
370 {
371 struct v3dv_image *swapchain_image =
372 v3dv_wsi_get_image_from_swapchain(swapchain_info->swapchain, 0);
373 assert(swapchain_image);
374
375 VkImageCreateInfo local_create_info = *pCreateInfo;
376 local_create_info.pNext = NULL;
377
378 /* Added by wsi code. */
379 local_create_info.usage |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
380
381 /* The spec requires TILING_OPTIMAL as input, but the swapchain image may
382 * privately use a different tiling. See spec anchor
383 * #swapchain-wsi-image-create-info .
384 */
385 assert(local_create_info.tiling == VK_IMAGE_TILING_OPTIMAL);
386 local_create_info.tiling = swapchain_image->vk.tiling;
387
388 VkImageDrmFormatModifierListCreateInfoEXT local_modifier_info = {
389 .sType = VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT,
390 .drmFormatModifierCount = 1,
391 .pDrmFormatModifiers = &swapchain_image->vk.drm_format_mod,
392 };
393
394 if (swapchain_image->vk.drm_format_mod != DRM_FORMAT_MOD_INVALID)
395 __vk_append_struct(&local_create_info, &local_modifier_info);
396
397 assert(swapchain_image->vk.image_type == local_create_info.imageType);
398 assert(swapchain_image->vk.format == local_create_info.format);
399 assert(swapchain_image->vk.extent.width == local_create_info.extent.width);
400 assert(swapchain_image->vk.extent.height == local_create_info.extent.height);
401 assert(swapchain_image->vk.extent.depth == local_create_info.extent.depth);
402 assert(swapchain_image->vk.array_layers == local_create_info.arrayLayers);
403 assert(swapchain_image->vk.samples == local_create_info.samples);
404 assert(swapchain_image->vk.tiling == local_create_info.tiling);
405 assert((swapchain_image->vk.usage & local_create_info.usage) ==
406 local_create_info.usage);
407
408 return create_image(device, &local_create_info, pAllocator, pImage);
409 }
410
411 VKAPI_ATTR VkResult VKAPI_CALL
v3dv_CreateImage(VkDevice _device,const VkImageCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImage * pImage)412 v3dv_CreateImage(VkDevice _device,
413 const VkImageCreateInfo *pCreateInfo,
414 const VkAllocationCallbacks *pAllocator,
415 VkImage *pImage)
416 {
417 V3DV_FROM_HANDLE(v3dv_device, device, _device);
418
419 const VkImageSwapchainCreateInfoKHR *swapchain_info =
420 vk_find_struct_const(pCreateInfo->pNext, IMAGE_SWAPCHAIN_CREATE_INFO_KHR);
421 if (swapchain_info && swapchain_info->swapchain != VK_NULL_HANDLE)
422 return create_image_from_swapchain(device, pCreateInfo, swapchain_info,
423 pAllocator, pImage);
424
425 return create_image(device, pCreateInfo, pAllocator, pImage);
426 }
427
428 VKAPI_ATTR void VKAPI_CALL
v3dv_GetImageSubresourceLayout(VkDevice device,VkImage _image,const VkImageSubresource * subresource,VkSubresourceLayout * layout)429 v3dv_GetImageSubresourceLayout(VkDevice device,
430 VkImage _image,
431 const VkImageSubresource *subresource,
432 VkSubresourceLayout *layout)
433 {
434 V3DV_FROM_HANDLE(v3dv_image, image, _image);
435
436 const struct v3d_resource_slice *slice =
437 &image->slices[subresource->mipLevel];
438 layout->offset =
439 v3dv_layer_offset(image, subresource->mipLevel, subresource->arrayLayer) -
440 image->mem_offset;
441 layout->rowPitch = slice->stride;
442 layout->depthPitch = image->cube_map_stride;
443 layout->arrayPitch = image->cube_map_stride;
444
445 if (image->vk.image_type != VK_IMAGE_TYPE_3D) {
446 layout->size = slice->size;
447 } else {
448 /* For 3D images, the size of the slice represents the size of a 2D slice
449 * in the 3D image, so we have to multiply by the depth extent of the
450 * miplevel. For levels other than the first, we just compute the size
451 * as the distance between consecutive levels (notice that mip levels are
452 * arranged in memory from last to first).
453 */
454 if (subresource->mipLevel == 0) {
455 layout->size = slice->size * image->vk.extent.depth;
456 } else {
457 const struct v3d_resource_slice *prev_slice =
458 &image->slices[subresource->mipLevel - 1];
459 layout->size = prev_slice->offset - slice->offset;
460 }
461 }
462 }
463
464 VKAPI_ATTR void VKAPI_CALL
v3dv_DestroyImage(VkDevice _device,VkImage _image,const VkAllocationCallbacks * pAllocator)465 v3dv_DestroyImage(VkDevice _device,
466 VkImage _image,
467 const VkAllocationCallbacks* pAllocator)
468 {
469 V3DV_FROM_HANDLE(v3dv_device, device, _device);
470 V3DV_FROM_HANDLE(v3dv_image, image, _image);
471
472 if (image == NULL)
473 return;
474
475 #ifdef ANDROID
476 if (image->is_native_buffer_memory)
477 v3dv_FreeMemory(_device, v3dv_device_memory_to_handle(image->mem), pAllocator);
478 #endif
479
480 vk_image_destroy(&device->vk, pAllocator, &image->vk);
481 }
482
483 VkImageViewType
v3dv_image_type_to_view_type(VkImageType type)484 v3dv_image_type_to_view_type(VkImageType type)
485 {
486 switch (type) {
487 case VK_IMAGE_TYPE_1D: return VK_IMAGE_VIEW_TYPE_1D;
488 case VK_IMAGE_TYPE_2D: return VK_IMAGE_VIEW_TYPE_2D;
489 case VK_IMAGE_TYPE_3D: return VK_IMAGE_VIEW_TYPE_3D;
490 default:
491 unreachable("Invalid image type");
492 }
493 }
494
495 static VkResult
create_image_view(struct v3dv_device * device,bool driver_internal,const VkImageViewCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImageView * pView)496 create_image_view(struct v3dv_device *device,
497 bool driver_internal,
498 const VkImageViewCreateInfo *pCreateInfo,
499 const VkAllocationCallbacks *pAllocator,
500 VkImageView *pView)
501 {
502 V3DV_FROM_HANDLE(v3dv_image, image, pCreateInfo->image);
503 struct v3dv_image_view *iview;
504
505 iview = vk_image_view_create(&device->vk, driver_internal, pCreateInfo,
506 pAllocator, sizeof(*iview));
507 if (iview == NULL)
508 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
509
510 const VkImageSubresourceRange *range = &pCreateInfo->subresourceRange;
511
512 iview->offset = v3dv_layer_offset(image, iview->vk.base_mip_level,
513 iview->vk.base_array_layer);
514
515 /* If we have D24S8 format but the view only selects the stencil aspect
516 * we want to re-interpret the format as RGBA8_UINT, then map our stencil
517 * data reads to the R component and ignore the GBA channels that contain
518 * the depth aspect data.
519 */
520 VkFormat format;
521 uint8_t image_view_swizzle[4];
522 if (pCreateInfo->format == VK_FORMAT_D24_UNORM_S8_UINT &&
523 range->aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) {
524 format = VK_FORMAT_R8G8B8A8_UINT;
525 image_view_swizzle[0] = PIPE_SWIZZLE_X;
526 image_view_swizzle[1] = PIPE_SWIZZLE_0;
527 image_view_swizzle[2] = PIPE_SWIZZLE_0;
528 image_view_swizzle[3] = PIPE_SWIZZLE_1;
529 } else {
530 format = pCreateInfo->format;
531
532 /* FIXME: we are doing this vk to pipe swizzle mapping just to call
533 * util_format_compose_swizzles. Would be good to check if it would be
534 * better to reimplement the latter using vk component
535 */
536 vk_component_mapping_to_pipe_swizzle(iview->vk.swizzle,
537 image_view_swizzle);
538 }
539
540 iview->vk.view_format = format;
541 iview->format = v3dv_X(device, get_format)(format);
542 assert(iview->format && iview->format->supported);
543
544 if (vk_format_is_depth_or_stencil(iview->vk.view_format)) {
545 iview->internal_type =
546 v3dv_X(device, get_internal_depth_type)(iview->vk.view_format);
547 } else {
548 v3dv_X(device, get_internal_type_bpp_for_output_format)
549 (iview->format->rt_type, &iview->internal_type, &iview->internal_bpp);
550 }
551
552 const uint8_t *format_swizzle = v3dv_get_format_swizzle(device, format);
553 util_format_compose_swizzles(format_swizzle, image_view_swizzle,
554 iview->swizzle);
555
556 iview->swap_rb = v3dv_format_swizzle_needs_rb_swap(iview->swizzle);
557 iview->channel_reverse = v3dv_format_swizzle_needs_reverse(iview->swizzle);
558
559 v3dv_X(device, pack_texture_shader_state)(device, iview);
560
561 *pView = v3dv_image_view_to_handle(iview);
562
563 return VK_SUCCESS;
564 }
565
566 VkResult
v3dv_create_image_view(struct v3dv_device * device,const VkImageViewCreateInfo * pCreateInfo,VkImageView * pView)567 v3dv_create_image_view(struct v3dv_device *device,
568 const VkImageViewCreateInfo *pCreateInfo,
569 VkImageView *pView)
570 {
571 return create_image_view(device, true, pCreateInfo, NULL, pView);
572 }
573
574 VKAPI_ATTR VkResult VKAPI_CALL
v3dv_CreateImageView(VkDevice _device,const VkImageViewCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImageView * pView)575 v3dv_CreateImageView(VkDevice _device,
576 const VkImageViewCreateInfo *pCreateInfo,
577 const VkAllocationCallbacks *pAllocator,
578 VkImageView *pView)
579 {
580 V3DV_FROM_HANDLE(v3dv_device, device, _device);
581
582 return create_image_view(device, false, pCreateInfo, pAllocator, pView);
583 }
584
585 VKAPI_ATTR void VKAPI_CALL
v3dv_DestroyImageView(VkDevice _device,VkImageView imageView,const VkAllocationCallbacks * pAllocator)586 v3dv_DestroyImageView(VkDevice _device,
587 VkImageView imageView,
588 const VkAllocationCallbacks* pAllocator)
589 {
590 V3DV_FROM_HANDLE(v3dv_device, device, _device);
591 V3DV_FROM_HANDLE(v3dv_image_view, image_view, imageView);
592
593 if (image_view == NULL)
594 return;
595
596 vk_image_view_destroy(&device->vk, pAllocator, &image_view->vk);
597 }
598
599 VKAPI_ATTR VkResult VKAPI_CALL
v3dv_CreateBufferView(VkDevice _device,const VkBufferViewCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBufferView * pView)600 v3dv_CreateBufferView(VkDevice _device,
601 const VkBufferViewCreateInfo *pCreateInfo,
602 const VkAllocationCallbacks *pAllocator,
603 VkBufferView *pView)
604 {
605 V3DV_FROM_HANDLE(v3dv_device, device, _device);
606
607 struct v3dv_buffer *buffer =
608 v3dv_buffer_from_handle(pCreateInfo->buffer);
609
610 struct v3dv_buffer_view *view =
611 vk_object_zalloc(&device->vk, pAllocator, sizeof(*view),
612 VK_OBJECT_TYPE_BUFFER_VIEW);
613 if (!view)
614 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
615
616 uint32_t range;
617 if (pCreateInfo->range == VK_WHOLE_SIZE)
618 range = buffer->size - pCreateInfo->offset;
619 else
620 range = pCreateInfo->range;
621
622 enum pipe_format pipe_format = vk_format_to_pipe_format(pCreateInfo->format);
623 uint32_t num_elements = range / util_format_get_blocksize(pipe_format);
624
625 view->buffer = buffer;
626 view->offset = pCreateInfo->offset;
627 view->size = view->offset + range;
628 view->num_elements = num_elements;
629 view->vk_format = pCreateInfo->format;
630 view->format = v3dv_X(device, get_format)(view->vk_format);
631
632 v3dv_X(device, get_internal_type_bpp_for_output_format)
633 (view->format->rt_type, &view->internal_type, &view->internal_bpp);
634
635 if (buffer->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT ||
636 buffer->usage & VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT)
637 v3dv_X(device, pack_texture_shader_state_from_buffer_view)(device, view);
638
639 *pView = v3dv_buffer_view_to_handle(view);
640
641 return VK_SUCCESS;
642 }
643
644 VKAPI_ATTR void VKAPI_CALL
v3dv_DestroyBufferView(VkDevice _device,VkBufferView bufferView,const VkAllocationCallbacks * pAllocator)645 v3dv_DestroyBufferView(VkDevice _device,
646 VkBufferView bufferView,
647 const VkAllocationCallbacks *pAllocator)
648 {
649 V3DV_FROM_HANDLE(v3dv_device, device, _device);
650 V3DV_FROM_HANDLE(v3dv_buffer_view, buffer_view, bufferView);
651
652 if (buffer_view == NULL)
653 return;
654
655 vk_object_free(&device->vk, pAllocator, buffer_view);
656 }
657