1 /*
2 * Copyright © 2019 Raspberry Pi
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "v3dv_private.h"
25
26 #include "broadcom/cle/v3dx_pack.h"
27 #include "drm-uapi/drm_fourcc.h"
28 #include "util/format/u_format.h"
29 #include "util/u_math.h"
30 #include "vk_format_info.h"
31 #include "vk_util.h"
32 #include "vulkan/wsi/wsi_common.h"
33
34 /**
35 * Computes the HW's UIFblock padding for a given height/cpp.
36 *
37 * The goal of the padding is to keep pages of the same color (bank number) at
38 * least half a page away from each other vertically when crossing between
39 * columns of UIF blocks.
40 */
41 static uint32_t
v3d_get_ub_pad(uint32_t cpp,uint32_t height)42 v3d_get_ub_pad(uint32_t cpp, uint32_t height)
43 {
44 uint32_t utile_h = v3d_utile_height(cpp);
45 uint32_t uif_block_h = utile_h * 2;
46 uint32_t height_ub = height / uif_block_h;
47
48 uint32_t height_offset_in_pc = height_ub % PAGE_CACHE_UB_ROWS;
49
50 /* For the perfectly-aligned-for-UIF-XOR case, don't add any pad. */
51 if (height_offset_in_pc == 0)
52 return 0;
53
54 /* Try padding up to where we're offset by at least half a page. */
55 if (height_offset_in_pc < PAGE_UB_ROWS_TIMES_1_5) {
56 /* If we fit entirely in the page cache, don't pad. */
57 if (height_ub < PAGE_CACHE_UB_ROWS)
58 return 0;
59 else
60 return PAGE_UB_ROWS_TIMES_1_5 - height_offset_in_pc;
61 }
62
63 /* If we're close to being aligned to page cache size, then round up
64 * and rely on XOR.
65 */
66 if (height_offset_in_pc > PAGE_CACHE_MINUS_1_5_UB_ROWS)
67 return PAGE_CACHE_UB_ROWS - height_offset_in_pc;
68
69 /* Otherwise, we're far enough away (top and bottom) to not need any
70 * padding.
71 */
72 return 0;
73 }
74
75 static void
v3d_setup_slices(struct v3dv_image * image)76 v3d_setup_slices(struct v3dv_image *image)
77 {
78 assert(image->cpp > 0);
79
80 uint32_t width = image->extent.width;
81 uint32_t height = image->extent.height;
82 uint32_t depth = image->extent.depth;
83
84 /* Note that power-of-two padding is based on level 1. These are not
85 * equivalent to just util_next_power_of_two(dimension), because at a
86 * level 0 dimension of 9, the level 1 power-of-two padded value is 4,
87 * not 8.
88 */
89 uint32_t pot_width = 2 * util_next_power_of_two(u_minify(width, 1));
90 uint32_t pot_height = 2 * util_next_power_of_two(u_minify(height, 1));
91 uint32_t pot_depth = 2 * util_next_power_of_two(u_minify(depth, 1));
92
93 uint32_t utile_w = v3d_utile_width(image->cpp);
94 uint32_t utile_h = v3d_utile_height(image->cpp);
95 uint32_t uif_block_w = utile_w * 2;
96 uint32_t uif_block_h = utile_h * 2;
97
98 uint32_t block_width = vk_format_get_blockwidth(image->vk_format);
99 uint32_t block_height = vk_format_get_blockheight(image->vk_format);
100
101 assert(image->samples == VK_SAMPLE_COUNT_1_BIT ||
102 image->samples == VK_SAMPLE_COUNT_4_BIT);
103 bool msaa = image->samples != VK_SAMPLE_COUNT_1_BIT;
104
105 bool uif_top = msaa;
106
107 assert(image->array_size > 0);
108 assert(depth > 0);
109 assert(image->levels >= 1);
110
111 uint32_t offset = 0;
112 for (int32_t i = image->levels - 1; i >= 0; i--) {
113 struct v3d_resource_slice *slice = &image->slices[i];
114
115 uint32_t level_width, level_height, level_depth;
116 if (i < 2) {
117 level_width = u_minify(width, i);
118 level_height = u_minify(height, i);
119 } else {
120 level_width = u_minify(pot_width, i);
121 level_height = u_minify(pot_height, i);
122 }
123
124 if (i < 1)
125 level_depth = u_minify(depth, i);
126 else
127 level_depth = u_minify(pot_depth, i);
128
129 if (msaa) {
130 level_width *= 2;
131 level_height *= 2;
132 }
133
134 level_width = DIV_ROUND_UP(level_width, block_width);
135 level_height = DIV_ROUND_UP(level_height, block_height);
136
137 if (!image->tiled) {
138 slice->tiling = VC5_TILING_RASTER;
139 if (image->type == VK_IMAGE_TYPE_1D)
140 level_width = align(level_width, 64 / image->cpp);
141 } else {
142 if ((i != 0 || !uif_top) &&
143 (level_width <= utile_w || level_height <= utile_h)) {
144 slice->tiling = VC5_TILING_LINEARTILE;
145 level_width = align(level_width, utile_w);
146 level_height = align(level_height, utile_h);
147 } else if ((i != 0 || !uif_top) && level_width <= uif_block_w) {
148 slice->tiling = VC5_TILING_UBLINEAR_1_COLUMN;
149 level_width = align(level_width, uif_block_w);
150 level_height = align(level_height, uif_block_h);
151 } else if ((i != 0 || !uif_top) && level_width <= 2 * uif_block_w) {
152 slice->tiling = VC5_TILING_UBLINEAR_2_COLUMN;
153 level_width = align(level_width, 2 * uif_block_w);
154 level_height = align(level_height, uif_block_h);
155 } else {
156 /* We align the width to a 4-block column of UIF blocks, but we
157 * only align height to UIF blocks.
158 */
159 level_width = align(level_width, 4 * uif_block_w);
160 level_height = align(level_height, uif_block_h);
161
162 slice->ub_pad = v3d_get_ub_pad(image->cpp, level_height);
163 level_height += slice->ub_pad * uif_block_h;
164
165 /* If the padding set us to to be aligned to the page cache size,
166 * then the HW will use the XOR bit on odd columns to get us
167 * perfectly misaligned.
168 */
169 if ((level_height / uif_block_h) %
170 (VC5_PAGE_CACHE_SIZE / VC5_UIFBLOCK_ROW_SIZE) == 0) {
171 slice->tiling = VC5_TILING_UIF_XOR;
172 } else {
173 slice->tiling = VC5_TILING_UIF_NO_XOR;
174 }
175 }
176 }
177
178 slice->offset = offset;
179 slice->stride = level_width * image->cpp;
180 slice->padded_height = level_height;
181 if (slice->tiling == VC5_TILING_UIF_NO_XOR ||
182 slice->tiling == VC5_TILING_UIF_XOR) {
183 slice->padded_height_of_output_image_in_uif_blocks =
184 slice->padded_height / (2 * v3d_utile_height(image->cpp));
185 }
186
187 slice->size = level_height * slice->stride;
188 uint32_t slice_total_size = slice->size * level_depth;
189
190 /* The HW aligns level 1's base to a page if any of level 1 or
191 * below could be UIF XOR. The lower levels then inherit the
192 * alignment for as long as necesary, thanks to being power of
193 * two aligned.
194 */
195 if (i == 1 &&
196 level_width > 4 * uif_block_w &&
197 level_height > PAGE_CACHE_MINUS_1_5_UB_ROWS * uif_block_h) {
198 slice_total_size = align(slice_total_size, VC5_UIFCFG_PAGE_SIZE);
199 }
200
201 offset += slice_total_size;
202 }
203
204 image->size = offset;
205
206 /* UIF/UBLINEAR levels need to be aligned to UIF-blocks, and LT only
207 * needs to be aligned to utile boundaries. Since tiles are laid out
208 * from small to big in memory, we need to align the later UIF slices
209 * to UIF blocks, if they were preceded by non-UIF-block-aligned LT
210 * slices.
211 *
212 * We additionally align to 4k, which improves UIF XOR performance.
213 */
214 image->alignment =
215 image->tiling == VK_IMAGE_TILING_LINEAR ? image->cpp : 4096;
216 uint32_t align_offset =
217 align(image->slices[0].offset, image->alignment) - image->slices[0].offset;
218 if (align_offset) {
219 image->size += align_offset;
220 for (int i = 0; i < image->levels; i++)
221 image->slices[i].offset += align_offset;
222 }
223
224 /* Arrays and cube textures have a stride which is the distance from
225 * one full mipmap tree to the next (64b aligned). For 3D textures,
226 * we need to program the stride between slices of miplevel 0.
227 */
228 if (image->type != VK_IMAGE_TYPE_3D) {
229 image->cube_map_stride =
230 align(image->slices[0].offset + image->slices[0].size, 64);
231 image->size += image->cube_map_stride * (image->array_size - 1);
232 } else {
233 image->cube_map_stride = image->slices[0].size;
234 }
235 }
236
237 uint32_t
v3dv_layer_offset(const struct v3dv_image * image,uint32_t level,uint32_t layer)238 v3dv_layer_offset(const struct v3dv_image *image, uint32_t level, uint32_t layer)
239 {
240 const struct v3d_resource_slice *slice = &image->slices[level];
241
242 if (image->type == VK_IMAGE_TYPE_3D)
243 return image->mem_offset + slice->offset + layer * slice->size;
244 else
245 return image->mem_offset + slice->offset + layer * image->cube_map_stride;
246 }
247
248 VkResult
v3dv_CreateImage(VkDevice _device,const VkImageCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImage * pImage)249 v3dv_CreateImage(VkDevice _device,
250 const VkImageCreateInfo *pCreateInfo,
251 const VkAllocationCallbacks *pAllocator,
252 VkImage *pImage)
253 {
254 V3DV_FROM_HANDLE(v3dv_device, device, _device);
255 struct v3dv_image *image = NULL;
256
257 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO);
258
259 v3dv_assert(pCreateInfo->mipLevels > 0);
260 v3dv_assert(pCreateInfo->arrayLayers > 0);
261 v3dv_assert(pCreateInfo->samples > 0);
262 v3dv_assert(pCreateInfo->extent.width > 0);
263 v3dv_assert(pCreateInfo->extent.height > 0);
264 v3dv_assert(pCreateInfo->extent.depth > 0);
265
266 /* When using the simulator the WSI common code will see that our
267 * driver wsi device doesn't match the display device and because of that
268 * it will not attempt to present directly from the swapchain images,
269 * instead it will use the prime blit path (use_prime_blit flag in
270 * struct wsi_swapchain), where it copies the contents of the swapchain
271 * images to a linear buffer with appropriate row stride for presentation.
272 * As a result, on that path, swapchain images do not have any special
273 * requirements and are not created with the pNext structs below.
274 */
275 uint64_t modifier = DRM_FORMAT_MOD_INVALID;
276 if (pCreateInfo->tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
277 const VkImageDrmFormatModifierListCreateInfoEXT *mod_info =
278 vk_find_struct_const(pCreateInfo->pNext,
279 IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT);
280 assert(mod_info);
281 for (uint32_t i = 0; i < mod_info->drmFormatModifierCount; i++) {
282 switch (mod_info->pDrmFormatModifiers[i]) {
283 case DRM_FORMAT_MOD_LINEAR:
284 if (modifier == DRM_FORMAT_MOD_INVALID)
285 modifier = DRM_FORMAT_MOD_LINEAR;
286 break;
287 case DRM_FORMAT_MOD_BROADCOM_UIF:
288 modifier = DRM_FORMAT_MOD_BROADCOM_UIF;
289 break;
290 }
291 }
292 } else {
293 const struct wsi_image_create_info *wsi_info =
294 vk_find_struct_const(pCreateInfo->pNext, WSI_IMAGE_CREATE_INFO_MESA);
295 if (wsi_info)
296 modifier = DRM_FORMAT_MOD_LINEAR;
297 }
298
299 /* 1D and 1D_ARRAY textures are always raster-order */
300 VkImageTiling tiling;
301 if (pCreateInfo->imageType == VK_IMAGE_TYPE_1D)
302 tiling = VK_IMAGE_TILING_LINEAR;
303 else if (modifier == DRM_FORMAT_MOD_INVALID)
304 tiling = pCreateInfo->tiling;
305 else if (modifier == DRM_FORMAT_MOD_BROADCOM_UIF)
306 tiling = VK_IMAGE_TILING_OPTIMAL;
307 else
308 tiling = VK_IMAGE_TILING_LINEAR;
309
310 const struct v3dv_format *format = v3dv_get_format(pCreateInfo->format);
311 v3dv_assert(format != NULL && format->supported);
312
313 image = vk_zalloc2(&device->alloc, pAllocator, sizeof(*image), 8,
314 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
315 if (!image)
316 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
317
318 assert(pCreateInfo->samples == VK_SAMPLE_COUNT_1_BIT ||
319 pCreateInfo->samples == VK_SAMPLE_COUNT_4_BIT);
320
321 image->type = pCreateInfo->imageType;
322 image->extent = pCreateInfo->extent;
323 image->vk_format = pCreateInfo->format;
324 image->format = format;
325 image->aspects = vk_format_aspects(image->vk_format);
326 image->levels = pCreateInfo->mipLevels;
327 image->array_size = pCreateInfo->arrayLayers;
328 image->samples = pCreateInfo->samples;
329 image->usage = pCreateInfo->usage;
330 image->flags = pCreateInfo->flags;
331
332 image->drm_format_mod = modifier;
333 image->tiling = tiling;
334 image->tiled = tiling == VK_IMAGE_TILING_OPTIMAL;
335
336 image->cpp = vk_format_get_blocksize(image->vk_format);
337
338 v3d_setup_slices(image);
339
340 *pImage = v3dv_image_to_handle(image);
341
342 return VK_SUCCESS;
343 }
344
345 void
v3dv_GetImageSubresourceLayout(VkDevice device,VkImage _image,const VkImageSubresource * subresource,VkSubresourceLayout * layout)346 v3dv_GetImageSubresourceLayout(VkDevice device,
347 VkImage _image,
348 const VkImageSubresource *subresource,
349 VkSubresourceLayout *layout)
350 {
351 V3DV_FROM_HANDLE(v3dv_image, image, _image);
352
353 const struct v3d_resource_slice *slice =
354 &image->slices[subresource->mipLevel];
355 layout->offset =
356 v3dv_layer_offset(image, subresource->mipLevel, subresource->arrayLayer);
357 layout->rowPitch = slice->stride;
358 layout->depthPitch = image->cube_map_stride;
359 layout->arrayPitch = image->cube_map_stride;
360
361 if (image->type != VK_IMAGE_TYPE_3D) {
362 layout->size = slice->size;
363 } else {
364 /* For 3D images, the size of the slice represents the size of a 2D slice
365 * in the 3D image, so we have to multiply by the depth extent of the
366 * miplevel. For levels other than the first, we just compute the size
367 * as the distance between consecutive levels (notice that mip levels are
368 * arranged in memory from last to first).
369 */
370 if (subresource->mipLevel == 0) {
371 layout->size = slice->size * image->extent.depth;
372 } else {
373 const struct v3d_resource_slice *prev_slice =
374 &image->slices[subresource->mipLevel - 1];
375 layout->size = prev_slice->offset - slice->offset;
376 }
377 }
378 }
379
380 VkResult
v3dv_GetImageDrmFormatModifierPropertiesEXT(VkDevice device,VkImage _image,VkImageDrmFormatModifierPropertiesEXT * pProperties)381 v3dv_GetImageDrmFormatModifierPropertiesEXT(
382 VkDevice device,
383 VkImage _image,
384 VkImageDrmFormatModifierPropertiesEXT *pProperties)
385 {
386 V3DV_FROM_HANDLE(v3dv_image, image, _image);
387
388 assert(pProperties->sType ==
389 VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT);
390
391 pProperties->drmFormatModifier = image->drm_format_mod;
392
393 return VK_SUCCESS;
394 }
395
396 void
v3dv_DestroyImage(VkDevice _device,VkImage _image,const VkAllocationCallbacks * pAllocator)397 v3dv_DestroyImage(VkDevice _device,
398 VkImage _image,
399 const VkAllocationCallbacks* pAllocator)
400 {
401 V3DV_FROM_HANDLE(v3dv_device, device, _device);
402 V3DV_FROM_HANDLE(v3dv_image, image, _image);
403 vk_free2(&device->alloc, pAllocator, image);
404 }
405
406 VkImageViewType
v3dv_image_type_to_view_type(VkImageType type)407 v3dv_image_type_to_view_type(VkImageType type)
408 {
409 switch (type) {
410 case VK_IMAGE_TYPE_1D: return VK_IMAGE_VIEW_TYPE_1D;
411 case VK_IMAGE_TYPE_2D: return VK_IMAGE_VIEW_TYPE_2D;
412 case VK_IMAGE_TYPE_3D: return VK_IMAGE_VIEW_TYPE_3D;
413 default:
414 unreachable("Invalid image type");
415 }
416 }
417
418 /*
419 * This method translates pipe_swizzle to the swizzle values used at the
420 * packet TEXTURE_SHADER_STATE
421 *
422 * FIXME: C&P from v3d, common place?
423 */
424 static uint32_t
translate_swizzle(unsigned char pipe_swizzle)425 translate_swizzle(unsigned char pipe_swizzle)
426 {
427 switch (pipe_swizzle) {
428 case PIPE_SWIZZLE_0:
429 return 0;
430 case PIPE_SWIZZLE_1:
431 return 1;
432 case PIPE_SWIZZLE_X:
433 case PIPE_SWIZZLE_Y:
434 case PIPE_SWIZZLE_Z:
435 case PIPE_SWIZZLE_W:
436 return 2 + pipe_swizzle;
437 default:
438 unreachable("unknown swizzle");
439 }
440 }
441
442 /*
443 * Packs and ensure bo for the shader state (the latter can be temporal).
444 */
445 static void
pack_texture_shader_state_helper(struct v3dv_device * device,struct v3dv_image_view * image_view,bool for_cube_map_array_storage)446 pack_texture_shader_state_helper(struct v3dv_device *device,
447 struct v3dv_image_view *image_view,
448 bool for_cube_map_array_storage)
449 {
450 assert(!for_cube_map_array_storage ||
451 image_view->type == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY);
452 const uint32_t index = for_cube_map_array_storage ? 1 : 0;
453
454 assert(image_view->image);
455 const struct v3dv_image *image = image_view->image;
456
457 assert(image->samples == VK_SAMPLE_COUNT_1_BIT ||
458 image->samples == VK_SAMPLE_COUNT_4_BIT);
459 const uint32_t msaa_scale = image->samples == VK_SAMPLE_COUNT_1_BIT ? 1 : 2;
460
461 v3dv_pack(image_view->texture_shader_state[index], TEXTURE_SHADER_STATE, tex) {
462
463 tex.level_0_is_strictly_uif =
464 (image->slices[0].tiling == VC5_TILING_UIF_XOR ||
465 image->slices[0].tiling == VC5_TILING_UIF_NO_XOR);
466
467 tex.level_0_xor_enable = (image->slices[0].tiling == VC5_TILING_UIF_XOR);
468
469 if (tex.level_0_is_strictly_uif)
470 tex.level_0_ub_pad = image->slices[0].ub_pad;
471
472 /* FIXME: v3d never sets uif_xor_disable, but uses it on the following
473 * check so let's set the default value
474 */
475 tex.uif_xor_disable = false;
476 if (tex.uif_xor_disable ||
477 tex.level_0_is_strictly_uif) {
478 tex.extended = true;
479 }
480
481 tex.base_level = image_view->base_level;
482 tex.max_level = image_view->max_level;
483
484 tex.swizzle_r = translate_swizzle(image_view->swizzle[0]);
485 tex.swizzle_g = translate_swizzle(image_view->swizzle[1]);
486 tex.swizzle_b = translate_swizzle(image_view->swizzle[2]);
487 tex.swizzle_a = translate_swizzle(image_view->swizzle[3]);
488
489 tex.texture_type = image_view->format->tex_type;
490
491 if (image->type == VK_IMAGE_TYPE_3D) {
492 tex.image_depth = image->extent.depth;
493 } else {
494 tex.image_depth = (image_view->last_layer - image_view->first_layer) + 1;
495 }
496
497 /* Empirical testing with CTS shows that when we are sampling from cube
498 * arrays we want to set image depth to layers / 6, but not when doing
499 * image load/store.
500 */
501 if (image_view->type == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY &&
502 !for_cube_map_array_storage) {
503 assert(tex.image_depth % 6 == 0);
504 tex.image_depth /= 6;
505 }
506
507 tex.image_height = image->extent.height * msaa_scale;
508 tex.image_width = image->extent.width * msaa_scale;
509
510 /* On 4.x, the height of a 1D texture is redefined to be the
511 * upper 14 bits of the width (which is only usable with txf).
512 */
513 if (image->type == VK_IMAGE_TYPE_1D) {
514 tex.image_height = tex.image_width >> 14;
515 }
516 tex.image_width &= (1 << 14) - 1;
517 tex.image_height &= (1 << 14) - 1;
518
519 tex.array_stride_64_byte_aligned = image->cube_map_stride / 64;
520
521 tex.srgb = vk_format_is_srgb(image_view->vk_format);
522
523 /* At this point we don't have the job. That's the reason the first
524 * parameter is NULL, to avoid a crash when cl_pack_emit_reloc tries to
525 * add the bo to the job. This also means that we need to add manually
526 * the image bo to the job using the texture.
527 */
528 const uint32_t base_offset =
529 image->mem->bo->offset +
530 v3dv_layer_offset(image, 0, image_view->first_layer);
531 tex.texture_base_pointer = v3dv_cl_address(NULL, base_offset);
532 }
533 }
534
535 static void
pack_texture_shader_state(struct v3dv_device * device,struct v3dv_image_view * iview)536 pack_texture_shader_state(struct v3dv_device *device,
537 struct v3dv_image_view *iview)
538 {
539 pack_texture_shader_state_helper(device, iview, false);
540 if (iview->type == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
541 pack_texture_shader_state_helper(device, iview, true);
542 }
543
544 static enum pipe_swizzle
vk_component_mapping_to_pipe_swizzle(VkComponentSwizzle comp,VkComponentSwizzle swz)545 vk_component_mapping_to_pipe_swizzle(VkComponentSwizzle comp,
546 VkComponentSwizzle swz)
547 {
548 if (swz == VK_COMPONENT_SWIZZLE_IDENTITY)
549 swz = comp;
550
551 switch (swz) {
552 case VK_COMPONENT_SWIZZLE_ZERO:
553 return PIPE_SWIZZLE_0;
554 case VK_COMPONENT_SWIZZLE_ONE:
555 return PIPE_SWIZZLE_1;
556 case VK_COMPONENT_SWIZZLE_R:
557 return PIPE_SWIZZLE_X;
558 case VK_COMPONENT_SWIZZLE_G:
559 return PIPE_SWIZZLE_Y;
560 case VK_COMPONENT_SWIZZLE_B:
561 return PIPE_SWIZZLE_Z;
562 case VK_COMPONENT_SWIZZLE_A:
563 return PIPE_SWIZZLE_W;
564 default:
565 unreachable("Unknown VkComponentSwizzle");
566 };
567 }
568
569 VkResult
v3dv_CreateImageView(VkDevice _device,const VkImageViewCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImageView * pView)570 v3dv_CreateImageView(VkDevice _device,
571 const VkImageViewCreateInfo *pCreateInfo,
572 const VkAllocationCallbacks *pAllocator,
573 VkImageView *pView)
574 {
575 V3DV_FROM_HANDLE(v3dv_device, device, _device);
576 V3DV_FROM_HANDLE(v3dv_image, image, pCreateInfo->image);
577 struct v3dv_image_view *iview;
578
579 iview = vk_zalloc2(&device->alloc, pAllocator, sizeof(*iview), 8,
580 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
581 if (iview == NULL)
582 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
583
584 const VkImageSubresourceRange *range = &pCreateInfo->subresourceRange;
585
586 assert(range->layerCount > 0);
587 assert(range->baseMipLevel < image->levels);
588
589 #ifdef DEBUG
590 switch (image->type) {
591 case VK_IMAGE_TYPE_1D:
592 case VK_IMAGE_TYPE_2D:
593 assert(range->baseArrayLayer + v3dv_layer_count(image, range) - 1 <=
594 image->array_size);
595 break;
596 case VK_IMAGE_TYPE_3D:
597 assert(range->baseArrayLayer + v3dv_layer_count(image, range) - 1
598 <= u_minify(image->extent.depth, range->baseMipLevel));
599 /* VK_KHR_maintenance1 */
600 assert(pCreateInfo->viewType != VK_IMAGE_VIEW_TYPE_2D ||
601 ((image->flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT) &&
602 range->levelCount == 1 && range->layerCount == 1));
603 assert(pCreateInfo->viewType != VK_IMAGE_VIEW_TYPE_2D_ARRAY ||
604 ((image->flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT) &&
605 range->levelCount == 1));
606 break;
607 default:
608 unreachable("bad VkImageType");
609 }
610 #endif
611
612 iview->image = image;
613 iview->aspects = range->aspectMask;
614 iview->type = pCreateInfo->viewType;
615
616 iview->base_level = range->baseMipLevel;
617 iview->max_level = iview->base_level + v3dv_level_count(image, range) - 1;
618 iview->extent = (VkExtent3D) {
619 .width = u_minify(image->extent.width , iview->base_level),
620 .height = u_minify(image->extent.height, iview->base_level),
621 .depth = u_minify(image->extent.depth , iview->base_level),
622 };
623
624 iview->first_layer = range->baseArrayLayer;
625 iview->last_layer = range->baseArrayLayer +
626 v3dv_layer_count(image, range) - 1;
627 iview->offset =
628 v3dv_layer_offset(image, iview->base_level, iview->first_layer);
629
630 /* If we have D24S8 format but the view only selects the stencil aspect
631 * we want to re-interpret the format as RGBA8_UINT, then map our stencil
632 * data reads to the R component and ignore the GBA channels that contain
633 * the depth aspect data.
634 */
635 VkFormat format;
636 uint8_t image_view_swizzle[4];
637 if (pCreateInfo->format == VK_FORMAT_D24_UNORM_S8_UINT &&
638 range->aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) {
639 format = VK_FORMAT_R8G8B8A8_UINT;
640 image_view_swizzle[0] = PIPE_SWIZZLE_X;
641 image_view_swizzle[1] = PIPE_SWIZZLE_0;
642 image_view_swizzle[2] = PIPE_SWIZZLE_0;
643 image_view_swizzle[3] = PIPE_SWIZZLE_1;
644 } else {
645 format = pCreateInfo->format;
646
647 /* FIXME: we are doing this vk to pipe swizzle mapping just to call
648 * util_format_compose_swizzles. Would be good to check if it would be
649 * better to reimplement the latter using vk component
650 */
651 image_view_swizzle[0] =
652 vk_component_mapping_to_pipe_swizzle(VK_COMPONENT_SWIZZLE_R,
653 pCreateInfo->components.r);
654 image_view_swizzle[1] =
655 vk_component_mapping_to_pipe_swizzle(VK_COMPONENT_SWIZZLE_G,
656 pCreateInfo->components.g);
657 image_view_swizzle[2] =
658 vk_component_mapping_to_pipe_swizzle(VK_COMPONENT_SWIZZLE_B,
659 pCreateInfo->components.b);
660 image_view_swizzle[3] =
661 vk_component_mapping_to_pipe_swizzle(VK_COMPONENT_SWIZZLE_A,
662 pCreateInfo->components.a);
663 }
664
665 iview->vk_format = format;
666 iview->format = v3dv_get_format(format);
667 assert(iview->format && iview->format->supported);
668
669 if (vk_format_is_depth_or_stencil(iview->vk_format)) {
670 iview->internal_type = v3dv_get_internal_depth_type(iview->vk_format);
671 } else {
672 v3dv_get_internal_type_bpp_for_output_format(iview->format->rt_type,
673 &iview->internal_type,
674 &iview->internal_bpp);
675 }
676
677 const uint8_t *format_swizzle = v3dv_get_format_swizzle(format);
678 util_format_compose_swizzles(format_swizzle, image_view_swizzle,
679 iview->swizzle);
680 iview->swap_rb = iview->swizzle[0] == PIPE_SWIZZLE_Z;
681
682 pack_texture_shader_state(device, iview);
683
684 *pView = v3dv_image_view_to_handle(iview);
685
686 return VK_SUCCESS;
687 }
688
689 void
v3dv_DestroyImageView(VkDevice _device,VkImageView imageView,const VkAllocationCallbacks * pAllocator)690 v3dv_DestroyImageView(VkDevice _device,
691 VkImageView imageView,
692 const VkAllocationCallbacks* pAllocator)
693 {
694 V3DV_FROM_HANDLE(v3dv_device, device, _device);
695 V3DV_FROM_HANDLE(v3dv_image_view, image_view, imageView);
696
697 vk_free2(&device->alloc, pAllocator, image_view);
698 }
699
700 static void
pack_texture_shader_state_from_buffer_view(struct v3dv_device * device,struct v3dv_buffer_view * buffer_view)701 pack_texture_shader_state_from_buffer_view(struct v3dv_device *device,
702 struct v3dv_buffer_view *buffer_view)
703 {
704 assert(buffer_view->buffer);
705 const struct v3dv_buffer *buffer = buffer_view->buffer;
706
707 v3dv_pack(buffer_view->texture_shader_state, TEXTURE_SHADER_STATE, tex) {
708 tex.swizzle_r = translate_swizzle(PIPE_SWIZZLE_X);
709 tex.swizzle_g = translate_swizzle(PIPE_SWIZZLE_Y);
710 tex.swizzle_b = translate_swizzle(PIPE_SWIZZLE_Z);
711 tex.swizzle_a = translate_swizzle(PIPE_SWIZZLE_W);
712
713 tex.image_depth = 1;
714
715 /* On 4.x, the height of a 1D texture is redefined to be the upper 14
716 * bits of the width (which is only usable with txf) (or in other words,
717 * we are providing a 28 bit field for size, but split on the usual
718 * 14bit height/width).
719 */
720 tex.image_width = buffer_view->size;
721 tex.image_height = tex.image_width >> 14;
722 tex.image_width &= (1 << 14) - 1;
723 tex.image_height &= (1 << 14) - 1;
724
725 tex.texture_type = buffer_view->format->tex_type;
726 tex.srgb = vk_format_is_srgb(buffer_view->vk_format);
727
728 /* At this point we don't have the job. That's the reason the first
729 * parameter is NULL, to avoid a crash when cl_pack_emit_reloc tries to
730 * add the bo to the job. This also means that we need to add manually
731 * the image bo to the job using the texture.
732 */
733 const uint32_t base_offset =
734 buffer->mem->bo->offset +
735 buffer->mem_offset +
736 buffer_view->offset;
737
738 tex.texture_base_pointer = v3dv_cl_address(NULL, base_offset);
739 }
740 }
741
742 VkResult
v3dv_CreateBufferView(VkDevice _device,const VkBufferViewCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBufferView * pView)743 v3dv_CreateBufferView(VkDevice _device,
744 const VkBufferViewCreateInfo *pCreateInfo,
745 const VkAllocationCallbacks *pAllocator,
746 VkBufferView *pView)
747 {
748 V3DV_FROM_HANDLE(v3dv_device, device, _device);
749
750 const struct v3dv_buffer *buffer =
751 v3dv_buffer_from_handle(pCreateInfo->buffer);
752
753 struct v3dv_buffer_view *view =
754 vk_alloc2(&device->alloc, pAllocator, sizeof(*view), 8,
755 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
756 if (!view)
757 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
758
759 uint32_t range;
760 if (pCreateInfo->range == VK_WHOLE_SIZE)
761 range = buffer->size - pCreateInfo->offset;
762 else
763 range = pCreateInfo->range;
764
765 enum pipe_format pipe_format = vk_format_to_pipe_format(pCreateInfo->format);
766 uint32_t num_elements = range / util_format_get_blocksize(pipe_format);
767
768 view->buffer = buffer;
769 view->offset = pCreateInfo->offset;
770 view->size = view->offset + range;
771 view->num_elements = num_elements;
772 view->vk_format = pCreateInfo->format;
773 view->format = v3dv_get_format(view->vk_format);
774
775 v3dv_get_internal_type_bpp_for_output_format(view->format->rt_type,
776 &view->internal_type,
777 &view->internal_bpp);
778
779 if (buffer->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT ||
780 buffer->usage & VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT)
781 pack_texture_shader_state_from_buffer_view(device, view);
782
783 *pView = v3dv_buffer_view_to_handle(view);
784
785 return VK_SUCCESS;
786 }
787
788 void
v3dv_DestroyBufferView(VkDevice _device,VkBufferView bufferView,const VkAllocationCallbacks * pAllocator)789 v3dv_DestroyBufferView(VkDevice _device,
790 VkBufferView bufferView,
791 const VkAllocationCallbacks *pAllocator)
792 {
793 V3DV_FROM_HANDLE(v3dv_device, device, _device);
794 V3DV_FROM_HANDLE(v3dv_buffer_view, buffer_view, bufferView);
795
796 vk_free2(&device->alloc, pAllocator, buffer_view);
797 }
798