• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2024 Valve Corporation
3  * Copyright 2024 Alyssa Rosenzweig
4  * Copyright 2022-2023 Collabora Ltd. and Red Hat Inc.
5  * SPDX-License-Identifier: MIT
6  */
7 #include "hk_buffer_view.h"
8 #include "asahi/layout/layout.h"
9 #include "asahi/lib/agx_nir_lower_vbo.h"
10 #include "util/bitscan.h"
11 #include "util/format/u_format.h"
12 #include "util/format/u_formats.h"
13 
14 #include "agx_helpers.h"
15 #include "agx_nir_texture.h"
16 #include "agx_pack.h"
17 #include "hk_buffer.h"
18 #include "hk_device.h"
19 #include "hk_entrypoints.h"
20 #include "hk_image.h"
21 #include "hk_physical_device.h"
22 
23 VkFormatFeatureFlags2
hk_get_buffer_format_features(struct hk_physical_device * pdev,VkFormat vk_format)24 hk_get_buffer_format_features(struct hk_physical_device *pdev,
25                               VkFormat vk_format)
26 {
27    VkFormatFeatureFlags2 features = 0;
28    enum pipe_format p_format = hk_format_to_pipe_format(vk_format);
29 
30    if (p_format == PIPE_FORMAT_NONE)
31       return 0;
32 
33    if (agx_vbo_supports_format(p_format))
34       features |= VK_FORMAT_FEATURE_2_VERTEX_BUFFER_BIT;
35 
36    if (ail_pixel_format[p_format].texturable &&
37        !util_format_is_depth_or_stencil(p_format)) {
38 
39       features |= VK_FORMAT_FEATURE_2_UNIFORM_TEXEL_BUFFER_BIT;
40 
41       /* RGB32 specially supported for uniform texel buffers only. */
42       if (util_is_power_of_two_nonzero(util_format_get_blocksize(p_format))) {
43          features |= VK_FORMAT_FEATURE_2_STORAGE_TEXEL_BUFFER_BIT |
44                      VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT;
45       }
46 
47       if (p_format == PIPE_FORMAT_R32_UINT || p_format == PIPE_FORMAT_R32_SINT)
48          features |= VK_FORMAT_FEATURE_2_STORAGE_TEXEL_BUFFER_ATOMIC_BIT;
49    }
50 
51    return features;
52 }
53 
54 VKAPI_ATTR VkResult VKAPI_CALL
hk_CreateBufferView(VkDevice _device,const VkBufferViewCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBufferView * pBufferView)55 hk_CreateBufferView(VkDevice _device, const VkBufferViewCreateInfo *pCreateInfo,
56                     const VkAllocationCallbacks *pAllocator,
57                     VkBufferView *pBufferView)
58 {
59    VK_FROM_HANDLE(hk_device, device, _device);
60    VK_FROM_HANDLE(hk_buffer, buffer, pCreateInfo->buffer);
61    struct hk_buffer_view *view;
62    VkResult result;
63 
64    view = vk_buffer_view_create(&device->vk, pCreateInfo, pAllocator,
65                                 sizeof(*view));
66    if (!view)
67       return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
68 
69    enum pipe_format format = hk_format_to_pipe_format(view->vk.format);
70    const struct util_format_description *desc = util_format_description(format);
71 
72    uint8_t format_swizzle[4] = {
73       desc->swizzle[0],
74       desc->swizzle[1],
75       desc->swizzle[2],
76       desc->swizzle[3],
77    };
78 
79    if (util_format_is_depth_or_stencil(format)) {
80       assert(!util_format_is_depth_and_stencil(format) &&
81              "separate stencil always used");
82 
83       /* Broadcast depth and stencil */
84       format_swizzle[0] = 0;
85       format_swizzle[1] = 0;
86       format_swizzle[2] = 0;
87       format_swizzle[3] = 0;
88    }
89 
90    /* Decompose the offset into a multiple of 16-bytes (which we can include in
91     * the address) and an extra texel-aligned tail offset of up to 15 bytes.
92     *
93     * This lets us offset partially in the shader instead, getting
94     * around alignment restrictions on the base address pointer.
95     */
96    uint64_t base = hk_buffer_address(buffer, 0) + (view->vk.offset & ~0xf);
97    uint32_t tail_offset_B = view->vk.offset & 0xf;
98    uint32_t tail_offset_el = tail_offset_B / util_format_get_blocksize(format);
99    assert(tail_offset_el * util_format_get_blocksize(format) == tail_offset_B &&
100           "must be texel aligned");
101 
102    struct agx_texture_packed tex;
103    agx_pack(&tex, TEXTURE, cfg) {
104       cfg.dimension = AGX_TEXTURE_DIMENSION_2D;
105       cfg.layout = AGX_LAYOUT_LINEAR;
106       cfg.channels = ail_pixel_format[format].channels;
107       cfg.type = ail_pixel_format[format].type;
108       cfg.swizzle_r = agx_channel_from_pipe(format_swizzle[0]);
109       cfg.swizzle_g = agx_channel_from_pipe(format_swizzle[1]);
110       cfg.swizzle_b = agx_channel_from_pipe(format_swizzle[2]);
111       cfg.swizzle_a = agx_channel_from_pipe(format_swizzle[3]);
112 
113       cfg.width = AGX_TEXTURE_BUFFER_WIDTH;
114       cfg.height = DIV_ROUND_UP(view->vk.elements, cfg.width);
115       cfg.first_level = cfg.last_level = 0;
116 
117       cfg.address = base;
118       cfg.buffer_size_sw = view->vk.elements;
119       cfg.buffer_offset_sw = tail_offset_el;
120 
121       cfg.srgb = (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB);
122       cfg.srgb_2_channel = cfg.srgb && util_format_colormask(desc) == 0x3;
123 
124       cfg.depth = 1;
125       cfg.stride = (cfg.width * util_format_get_blocksize(format)) - 16;
126    }
127 
128    struct agx_pbe_packed pbe;
129    agx_pack(&pbe, PBE, cfg) {
130       cfg.dimension = AGX_TEXTURE_DIMENSION_2D;
131       cfg.layout = AGX_LAYOUT_LINEAR;
132       cfg.channels = ail_pixel_format[format].channels;
133       cfg.type = ail_pixel_format[format].type;
134       cfg.srgb = util_format_is_srgb(format);
135 
136       assert(desc->nr_channels >= 1 && desc->nr_channels <= 4);
137 
138       for (unsigned i = 0; i < desc->nr_channels; ++i) {
139          if (desc->swizzle[i] == 0)
140             cfg.swizzle_r = i;
141          else if (desc->swizzle[i] == 1)
142             cfg.swizzle_g = i;
143          else if (desc->swizzle[i] == 2)
144             cfg.swizzle_b = i;
145          else if (desc->swizzle[i] == 3)
146             cfg.swizzle_a = i;
147       }
148 
149       cfg.buffer = base;
150       cfg.buffer_offset_sw = tail_offset_el;
151 
152       cfg.width = AGX_TEXTURE_BUFFER_WIDTH;
153       cfg.height = DIV_ROUND_UP(view->vk.elements, cfg.width);
154       cfg.level = 0;
155       cfg.stride = (cfg.width * util_format_get_blocksize(format)) - 4;
156       cfg.layers = 1;
157       cfg.levels = 1;
158    };
159 
160    result = hk_descriptor_table_add(device, &device->images, &tex, sizeof(tex),
161                                     &view->tex_desc_index);
162    if (result != VK_SUCCESS) {
163       vk_buffer_view_destroy(&device->vk, pAllocator, &view->vk);
164       return result;
165    }
166 
167    result = hk_descriptor_table_add(device, &device->images, &pbe, sizeof(pbe),
168                                     &view->pbe_desc_index);
169    if (result != VK_SUCCESS) {
170       hk_descriptor_table_remove(device, &device->images, view->tex_desc_index);
171       vk_buffer_view_destroy(&device->vk, pAllocator, &view->vk);
172       return result;
173    }
174 
175    *pBufferView = hk_buffer_view_to_handle(view);
176 
177    return VK_SUCCESS;
178 }
179 
180 VKAPI_ATTR void VKAPI_CALL
hk_DestroyBufferView(VkDevice _device,VkBufferView bufferView,const VkAllocationCallbacks * pAllocator)181 hk_DestroyBufferView(VkDevice _device, VkBufferView bufferView,
182                      const VkAllocationCallbacks *pAllocator)
183 {
184    VK_FROM_HANDLE(hk_device, device, _device);
185    VK_FROM_HANDLE(hk_buffer_view, view, bufferView);
186 
187    if (!view)
188       return;
189 
190    hk_descriptor_table_remove(device, &device->images, view->tex_desc_index);
191    hk_descriptor_table_remove(device, &device->images, view->pbe_desc_index);
192 
193    vk_buffer_view_destroy(&device->vk, pAllocator, &view->vk);
194 }
195