1 /*
2 * Copyright © 2022 Imagination Technologies Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a copy
5 * of this software and associated documentation files (the "Software"), to deal
6 * in the Software without restriction, including without limitation the rights
7 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 * copies of the Software, and to permit persons to whom the Software is
9 * furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <stdint.h>
27 #include <string.h>
28
29 #include "pvr_csb.h"
30 #include "pvr_device_info.h"
31 #include "pvr_formats.h"
32 #include "pvr_private.h"
33 #include "pvr_tex_state.h"
34 #include "util/macros.h"
35 #include "util/u_math.h"
36 #include "vk_format.h"
37 #include "vk_image.h"
38 #include "vk_log.h"
39 #include "vk_object.h"
40 #include "vk_util.h"
41 #include "wsi_common.h"
42
pvr_image_init_memlayout(struct pvr_image * image)43 static void pvr_image_init_memlayout(struct pvr_image *image)
44 {
45 switch (image->vk.tiling) {
46 default:
47 unreachable("bad VkImageTiling");
48 case VK_IMAGE_TILING_OPTIMAL:
49 if (image->vk.wsi_legacy_scanout)
50 image->memlayout = PVR_MEMLAYOUT_LINEAR;
51 else if (image->vk.image_type == VK_IMAGE_TYPE_3D)
52 image->memlayout = PVR_MEMLAYOUT_3DTWIDDLED;
53 else
54 image->memlayout = PVR_MEMLAYOUT_TWIDDLED;
55 break;
56 case VK_IMAGE_TILING_LINEAR:
57 image->memlayout = PVR_MEMLAYOUT_LINEAR;
58 break;
59 }
60 }
61
pvr_image_init_physical_extent(struct pvr_image * image)62 static void pvr_image_init_physical_extent(struct pvr_image *image)
63 {
64 assert(image->memlayout != PVR_MEMLAYOUT_UNDEFINED);
65
66 /* clang-format off */
67 if (image->vk.mip_levels > 1 ||
68 image->memlayout == PVR_MEMLAYOUT_TWIDDLED ||
69 image->memlayout == PVR_MEMLAYOUT_3DTWIDDLED) {
70 /* clang-format on */
71 image->physical_extent.width =
72 util_next_power_of_two(image->vk.extent.width);
73 image->physical_extent.height =
74 util_next_power_of_two(image->vk.extent.height);
75 image->physical_extent.depth =
76 util_next_power_of_two(image->vk.extent.depth);
77 } else {
78 assert(image->memlayout == PVR_MEMLAYOUT_LINEAR);
79 image->physical_extent = image->vk.extent;
80 }
81 }
82
pvr_image_setup_mip_levels(struct pvr_image * image)83 static void pvr_image_setup_mip_levels(struct pvr_image *image)
84 {
85 const uint32_t extent_alignment =
86 image->vk.image_type == VK_IMAGE_TYPE_3D ? 4 : 1;
87 const unsigned int cpp = vk_format_get_blocksize(image->vk.format);
88
89 /* Mip-mapped textures that are non-dword aligned need dword-aligned levels
90 * so they can be TQd from.
91 */
92 const uint32_t level_alignment = image->vk.mip_levels > 1 ? 4 : 1;
93
94 assert(image->vk.mip_levels <= ARRAY_SIZE(image->mip_levels));
95
96 image->layer_size = 0;
97
98 for (uint32_t i = 0; i < image->vk.mip_levels; i++) {
99 const uint32_t height = u_minify(image->physical_extent.height, i);
100 const uint32_t width = u_minify(image->physical_extent.width, i);
101 const uint32_t depth = u_minify(image->physical_extent.depth, i);
102 struct pvr_mip_level *mip_level = &image->mip_levels[i];
103
104 mip_level->pitch = cpp * ALIGN(width, extent_alignment);
105 mip_level->height_pitch = ALIGN(height, extent_alignment);
106 mip_level->size = image->vk.samples * mip_level->pitch *
107 mip_level->height_pitch *
108 ALIGN(depth, extent_alignment);
109 mip_level->size = ALIGN(mip_level->size, level_alignment);
110 mip_level->offset = image->layer_size;
111
112 image->layer_size += mip_level->size;
113 }
114
115 /* TODO: It might be useful to store the alignment in the image so it can be
116 * checked (via an assert?) when setting
117 * RGX_CR_TPU_TAG_CEM_4K_FACE_PACKING_EN, assuming this is where the
118 * requirement comes from.
119 */
120 if (image->vk.array_layers > 1)
121 image->layer_size = ALIGN(image->layer_size, image->alignment);
122
123 image->size = image->layer_size * image->vk.array_layers;
124 }
125
pvr_CreateImage(VkDevice _device,const VkImageCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImage * pImage)126 VkResult pvr_CreateImage(VkDevice _device,
127 const VkImageCreateInfo *pCreateInfo,
128 const VkAllocationCallbacks *pAllocator,
129 VkImage *pImage)
130 {
131 PVR_FROM_HANDLE(pvr_device, device, _device);
132 struct pvr_image *image;
133
134 pvr_finishme("Review whether all inputs are handled\n");
135
136 image =
137 vk_image_create(&device->vk, pCreateInfo, pAllocator, sizeof(*image));
138 if (!image)
139 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
140
141 /* All images aligned to 4k, in case of arrays/CEM.
142 * Refer: pvr_GetImageMemoryRequirements for further details.
143 */
144 image->alignment = 4096U;
145
146 /* Initialize the image using the saved information from pCreateInfo */
147 pvr_image_init_memlayout(image);
148 pvr_image_init_physical_extent(image);
149 pvr_image_setup_mip_levels(image);
150
151 *pImage = pvr_image_to_handle(image);
152
153 return VK_SUCCESS;
154 }
155
pvr_DestroyImage(VkDevice _device,VkImage _image,const VkAllocationCallbacks * pAllocator)156 void pvr_DestroyImage(VkDevice _device,
157 VkImage _image,
158 const VkAllocationCallbacks *pAllocator)
159 {
160 PVR_FROM_HANDLE(pvr_device, device, _device);
161 PVR_FROM_HANDLE(pvr_image, image, _image);
162
163 if (!image)
164 return;
165
166 if (image->vma)
167 pvr_unbind_memory(device, image->vma);
168
169 vk_image_destroy(&device->vk, pAllocator, &image->vk);
170 }
171
172 /* clang-format off */
173 /* Consider a 4 page buffer object.
174 * _________________________________________
175 * | | | | |
176 * |_________|__________|_________|__________|
177 * |
178 * \__ offset (0.5 page size)
179 *
180 * |___size(2 pages)____|
181 *
182 * |__VMA size required (3 pages)__|
183 *
184 * |
185 * \__ returned dev_addr = vma + offset % page_size
186 *
187 * VMA size = align(size + offset % page_size, page_size);
188 *
189 * Note: the above handling is currently divided between generic
190 * driver code and winsys layer. Given are the details of how this is
191 * being handled.
192 * * As winsys vma allocation interface does not have offset information,
193 * it can not calculate the extra size needed to adjust for the unaligned
194 * offset. So generic code is responsible for allocating a VMA that has
195 * extra space to deal with the above scenario.
196 * * Remaining work of mapping the vma to bo is done by vma_map interface,
197 * as it contains offset information, we don't need to do any adjustments
198 * in the generic code for this part.
199 *
200 * TODO: Look into merging heap_alloc and vma_map into single interface.
201 */
202 /* clang-format on */
203
pvr_BindImageMemory2(VkDevice _device,uint32_t bindInfoCount,const VkBindImageMemoryInfo * pBindInfos)204 VkResult pvr_BindImageMemory2(VkDevice _device,
205 uint32_t bindInfoCount,
206 const VkBindImageMemoryInfo *pBindInfos)
207 {
208 PVR_FROM_HANDLE(pvr_device, device, _device);
209 uint32_t i;
210
211 for (i = 0; i < bindInfoCount; i++) {
212 PVR_FROM_HANDLE(pvr_device_memory, mem, pBindInfos[i].memory);
213 PVR_FROM_HANDLE(pvr_image, image, pBindInfos[i].image);
214
215 VkResult result = pvr_bind_memory(device,
216 mem,
217 pBindInfos[i].memoryOffset,
218 image->size,
219 image->alignment,
220 &image->vma,
221 &image->dev_addr);
222 if (result != VK_SUCCESS) {
223 while (i--) {
224 PVR_FROM_HANDLE(pvr_image, image, pBindInfos[i].image);
225
226 pvr_unbind_memory(device, image->vma);
227 }
228
229 return result;
230 }
231 }
232
233 return VK_SUCCESS;
234 }
235
pvr_GetImageSubresourceLayout(VkDevice device,VkImage _image,const VkImageSubresource * subresource,VkSubresourceLayout * layout)236 void pvr_GetImageSubresourceLayout(VkDevice device,
237 VkImage _image,
238 const VkImageSubresource *subresource,
239 VkSubresourceLayout *layout)
240 {
241 PVR_FROM_HANDLE(pvr_image, image, _image);
242 const struct pvr_mip_level *mip_level =
243 &image->mip_levels[subresource->mipLevel];
244
245 pvr_assert(subresource->mipLevel < image->vk.mip_levels);
246 pvr_assert(subresource->arrayLayer < image->vk.array_layers);
247
248 layout->offset =
249 subresource->arrayLayer * image->layer_size + mip_level->offset;
250 layout->rowPitch = mip_level->pitch;
251 layout->depthPitch = mip_level->pitch * mip_level->height_pitch;
252 layout->arrayPitch = image->layer_size;
253 layout->size = mip_level->size;
254 }
255
pvr_CreateImageView(VkDevice _device,const VkImageViewCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImageView * pView)256 VkResult pvr_CreateImageView(VkDevice _device,
257 const VkImageViewCreateInfo *pCreateInfo,
258 const VkAllocationCallbacks *pAllocator,
259 VkImageView *pView)
260 {
261 PVR_FROM_HANDLE(pvr_image, image, pCreateInfo->image);
262 PVR_FROM_HANDLE(pvr_device, device, _device);
263 struct pvr_texture_state_info info;
264 unsigned char input_swizzle[4];
265 const uint8_t *format_swizzle;
266 struct pvr_image_view *iview;
267 VkResult result;
268
269 iview = vk_image_view_create(&device->vk,
270 false /* driver_internal */,
271 pCreateInfo,
272 pAllocator,
273 sizeof(*iview));
274 if (!iview)
275 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
276
277 iview->image = image;
278
279 info.type = iview->vk.view_type;
280 info.base_level = iview->vk.base_mip_level;
281 info.mip_levels = iview->vk.level_count;
282 info.extent = image->vk.extent;
283 info.is_cube = (info.type == VK_IMAGE_VIEW_TYPE_CUBE ||
284 info.type == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY);
285 info.array_size = iview->vk.layer_count;
286 info.offset = iview->vk.base_array_layer * image->layer_size +
287 image->mip_levels[info.base_level].offset;
288 info.mipmaps_present = (image->vk.mip_levels > 1) ? true : false;
289 info.stride = image->physical_extent.width;
290 info.tex_state_type = PVR_TEXTURE_STATE_SAMPLE;
291 info.mem_layout = image->memlayout;
292 info.flags = 0;
293 info.sample_count = image->vk.samples;
294 info.addr = image->dev_addr;
295
296 /* TODO: if ERN_46863 is supported, Depth and stencil are sampled separately
297 * from images with combined depth+stencil. Add logic here to handle it.
298 */
299 info.format = iview->vk.format;
300
301 vk_component_mapping_to_pipe_swizzle(iview->vk.swizzle, input_swizzle);
302 format_swizzle = pvr_get_format_swizzle(info.format);
303 util_format_compose_swizzles(format_swizzle, input_swizzle, info.swizzle);
304
305 result = pvr_pack_tex_state(device,
306 &info,
307 iview->texture_state[info.tex_state_type]);
308 if (result != VK_SUCCESS)
309 goto err_vk_image_view_destroy;
310
311 /* Create an additional texture state for cube type if storage
312 * usage flat is set.
313 */
314 if (info.is_cube && image->vk.usage & VK_IMAGE_USAGE_STORAGE_BIT) {
315 info.tex_state_type = PVR_TEXTURE_STATE_STORAGE;
316 result = pvr_pack_tex_state(device,
317 &info,
318 iview->texture_state[info.tex_state_type]);
319 if (result != VK_SUCCESS)
320 goto err_vk_image_view_destroy;
321 }
322
323 /* Attachment state is created as if the mipmaps are not supported, so the
324 * baselevel is set to zero and num_mip_levels is set to 1. Which gives an
325 * impression that this is the only level in the image. This also requires
326 * that width, height and depth be adjusted as well. Given iview->vk.extent
327 * is already adjusted for base mip map level we use it here.
328 */
329 /* TODO: Investigate and document the reason for above approach. */
330 info.extent = iview->vk.extent;
331
332 info.mip_levels = 1;
333 info.mipmaps_present = false;
334 info.stride = u_minify(image->physical_extent.width, info.base_level);
335 info.base_level = 0;
336 info.tex_state_type = PVR_TEXTURE_STATE_ATTACHMENT;
337
338 result = pvr_pack_tex_state(device,
339 &info,
340 iview->texture_state[info.tex_state_type]);
341 if (result != VK_SUCCESS)
342 goto err_vk_image_view_destroy;
343
344 *pView = pvr_image_view_to_handle(iview);
345
346 return VK_SUCCESS;
347
348 err_vk_image_view_destroy:
349 vk_image_view_destroy(&device->vk, pAllocator, &iview->vk);
350
351 return result;
352 }
353
pvr_DestroyImageView(VkDevice _device,VkImageView _iview,const VkAllocationCallbacks * pAllocator)354 void pvr_DestroyImageView(VkDevice _device,
355 VkImageView _iview,
356 const VkAllocationCallbacks *pAllocator)
357 {
358 PVR_FROM_HANDLE(pvr_device, device, _device);
359 PVR_FROM_HANDLE(pvr_image_view, iview, _iview);
360
361 if (!iview)
362 return;
363
364 vk_image_view_destroy(&device->vk, pAllocator, &iview->vk);
365 }
366
pvr_CreateBufferView(VkDevice _device,const VkBufferViewCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBufferView * pView)367 VkResult pvr_CreateBufferView(VkDevice _device,
368 const VkBufferViewCreateInfo *pCreateInfo,
369 const VkAllocationCallbacks *pAllocator,
370 VkBufferView *pView)
371 {
372 PVR_FROM_HANDLE(pvr_buffer, buffer, pCreateInfo->buffer);
373 PVR_FROM_HANDLE(pvr_device, device, _device);
374 struct pvr_texture_state_info info;
375 const uint8_t *format_swizzle;
376 struct pvr_buffer_view *bview;
377 VkResult result;
378
379 bview = vk_object_alloc(&device->vk,
380 pAllocator,
381 sizeof(*bview),
382 VK_OBJECT_TYPE_BUFFER_VIEW);
383 if (!bview)
384 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
385
386 bview->format = pCreateInfo->format;
387 bview->range =
388 vk_buffer_range(&buffer->vk, pCreateInfo->offset, pCreateInfo->range);
389
390 /* If the remaining size of the buffer is not a multiple of the element
391 * size of the format, the nearest smaller multiple is used.
392 */
393 bview->range -= bview->range % vk_format_get_blocksize(bview->format);
394
395 /* The range of the buffer view shouldn't be smaller than one texel. */
396 assert(bview->range >= vk_format_get_blocksize(bview->format));
397
398 info.base_level = 0U;
399 info.mip_levels = 1U;
400 info.mipmaps_present = false;
401 info.extent.width = 8192U;
402 info.extent.height = bview->range / vk_format_get_blocksize(bview->format);
403 info.extent.height = DIV_ROUND_UP(info.extent.height, info.extent.width);
404 info.extent.depth = 0U;
405 info.sample_count = 1U;
406 info.stride = info.extent.width;
407 info.offset = 0U;
408 info.addr = PVR_DEV_ADDR_OFFSET(buffer->dev_addr, pCreateInfo->offset);
409 info.mem_layout = PVR_MEMLAYOUT_LINEAR;
410 info.is_cube = false;
411 info.tex_state_type = PVR_TEXTURE_STATE_SAMPLE;
412 info.format = bview->format;
413 info.flags = PVR_TEXFLAGS_INDEX_LOOKUP;
414
415 if (PVR_HAS_FEATURE(&device->pdevice->dev_info, tpu_array_textures))
416 info.array_size = 1U;
417
418 format_swizzle = pvr_get_format_swizzle(info.format);
419 memcpy(info.swizzle, format_swizzle, sizeof(info.swizzle));
420
421 result = pvr_pack_tex_state(device, &info, bview->texture_state);
422 if (result != VK_SUCCESS)
423 goto err_vk_buffer_view_destroy;
424
425 *pView = pvr_buffer_view_to_handle(bview);
426
427 return VK_SUCCESS;
428
429 err_vk_buffer_view_destroy:
430 vk_object_free(&device->vk, pAllocator, bview);
431
432 return result;
433 }
434
pvr_DestroyBufferView(VkDevice _device,VkBufferView bufferView,const VkAllocationCallbacks * pAllocator)435 void pvr_DestroyBufferView(VkDevice _device,
436 VkBufferView bufferView,
437 const VkAllocationCallbacks *pAllocator)
438 {
439 PVR_FROM_HANDLE(pvr_buffer_view, bview, bufferView);
440 PVR_FROM_HANDLE(pvr_device, device, _device);
441
442 if (!bview)
443 return;
444
445 vk_object_free(&device->vk, pAllocator, bview);
446 }
447