• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2022 Collabora Ltd. and Red Hat Inc.
3  * SPDX-License-Identifier: MIT
4  */
5 #include "nvk_descriptor_set.h"
6 
7 #include "nvk_buffer.h"
8 #include "nvk_buffer_view.h"
9 #include "nvk_descriptor_set_layout.h"
10 #include "nvk_device.h"
11 #include "nvk_entrypoints.h"
12 #include "nvk_format.h"
13 #include "nvk_image_view.h"
14 #include "nvk_physical_device.h"
15 #include "nvk_sampler.h"
16 #include "nvkmd/nvkmd.h"
17 
18 #include "util/format/u_format.h"
19 
20 static inline uint32_t
align_u32(uint32_t v,uint32_t a)21 align_u32(uint32_t v, uint32_t a)
22 {
23    assert(a != 0 && a == (a & -a));
24    return (v + a - 1) & ~(a - 1);
25 }
26 
27 static inline void *
desc_ubo_data(struct nvk_descriptor_set * set,uint32_t binding,uint32_t elem,uint32_t * size_out)28 desc_ubo_data(struct nvk_descriptor_set *set, uint32_t binding,
29               uint32_t elem, uint32_t *size_out)
30 {
31    const struct nvk_descriptor_set_binding_layout *binding_layout =
32       &set->layout->binding[binding];
33 
34    uint32_t offset = binding_layout->offset + elem * binding_layout->stride;
35    assert(offset < set->size);
36 
37    if (size_out != NULL)
38       *size_out = set->size - offset;
39 
40    return (char *)set->map + offset;
41 }
42 
43 static void
write_desc(struct nvk_descriptor_set * set,uint32_t binding,uint32_t elem,const void * desc_data,size_t desc_size)44 write_desc(struct nvk_descriptor_set *set, uint32_t binding, uint32_t elem,
45            const void *desc_data, size_t desc_size)
46 {
47    ASSERTED uint32_t dst_size;
48    void *dst = desc_ubo_data(set, binding, elem, &dst_size);
49    assert(desc_size <= dst_size);
50    memcpy(dst, desc_data, desc_size);
51 }
52 
53 static void
get_sampled_image_view_desc(VkDescriptorType descriptor_type,const VkDescriptorImageInfo * const info,void * dst,size_t dst_size)54 get_sampled_image_view_desc(VkDescriptorType descriptor_type,
55                             const VkDescriptorImageInfo *const info,
56                             void *dst, size_t dst_size)
57 {
58    struct nvk_sampled_image_descriptor desc[3] = { };
59    uint8_t plane_count = 1;
60 
61    if (descriptor_type != VK_DESCRIPTOR_TYPE_SAMPLER &&
62        info && info->imageView != VK_NULL_HANDLE) {
63       VK_FROM_HANDLE(nvk_image_view, view, info->imageView);
64 
65       plane_count = view->plane_count;
66       for (uint8_t plane = 0; plane < plane_count; plane++) {
67          assert(view->planes[plane].sampled_desc_index > 0);
68          assert(view->planes[plane].sampled_desc_index < (1 << 20));
69          desc[plane].image_index = view->planes[plane].sampled_desc_index;
70       }
71    }
72 
73    if (descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER ||
74        descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
75       VK_FROM_HANDLE(nvk_sampler, sampler, info->sampler);
76 
77       plane_count = MAX2(plane_count, sampler->plane_count);
78 
79       for (uint8_t plane = 0; plane < plane_count; plane++) {
80          /* We need to replicate the last sampler plane out to all image
81           * planes due to sampler table entry limitations. See
82           * nvk_CreateSampler in nvk_sampler.c for more details.
83           */
84          uint8_t sampler_plane = MIN2(plane, sampler->plane_count - 1);
85          assert(sampler->planes[sampler_plane].desc_index < (1 << 12));
86          desc[plane].sampler_index = sampler->planes[sampler_plane].desc_index;
87       }
88    }
89 
90    assert(sizeof(desc[0]) * plane_count <= dst_size);
91    memcpy(dst, desc, sizeof(desc[0]) * plane_count);
92 }
93 
94 static void
write_sampled_image_view_desc(struct nvk_descriptor_set * set,const VkDescriptorImageInfo * const _info,uint32_t binding,uint32_t elem,VkDescriptorType descriptor_type)95 write_sampled_image_view_desc(struct nvk_descriptor_set *set,
96                               const VkDescriptorImageInfo *const _info,
97                               uint32_t binding, uint32_t elem,
98                               VkDescriptorType descriptor_type)
99 {
100    VkDescriptorImageInfo info = *_info;
101 
102    if (descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER ||
103        descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
104       const struct nvk_descriptor_set_binding_layout *binding_layout =
105          &set->layout->binding[binding];
106       if (binding_layout->immutable_samplers != NULL) {
107          info.sampler = nvk_sampler_to_handle(
108             binding_layout->immutable_samplers[elem]);
109       }
110    }
111 
112    uint32_t dst_size;
113    void *dst = desc_ubo_data(set, binding, elem, &dst_size);
114    get_sampled_image_view_desc(descriptor_type, &info, dst, dst_size);
115 }
116 
117 static void
get_storage_image_view_desc(const VkDescriptorImageInfo * const info,void * dst,size_t dst_size)118 get_storage_image_view_desc(const VkDescriptorImageInfo *const info,
119                             void *dst, size_t dst_size)
120 {
121    struct nvk_storage_image_descriptor desc = { };
122 
123    if (info && info->imageView != VK_NULL_HANDLE) {
124       VK_FROM_HANDLE(nvk_image_view, view, info->imageView);
125 
126       /* Storage images are always single plane */
127       assert(view->plane_count == 1);
128       uint8_t plane = 0;
129 
130       assert(view->planes[plane].storage_desc_index > 0);
131       assert(view->planes[plane].storage_desc_index < (1 << 20));
132 
133       desc.image_index = view->planes[plane].storage_desc_index;
134 
135       const struct nil_Extent4D_Samples px_extent_sa =
136          nil_px_extent_sa(view->planes[plane].sample_layout);
137       desc.sw_log2 = util_logbase2(px_extent_sa.width);
138       desc.sh_log2 = util_logbase2(px_extent_sa.height);
139 
140       const enum nil_sample_layout slayout = view->planes[plane].sample_layout;
141       if (slayout != NIL_SAMPLE_LAYOUT_1X1) {
142          uint32_t samples = nil_sample_layout_samples(slayout);
143          assert(samples <= 16);
144          for (uint32_t s = 0; s < samples; s++) {
145             const struct nil_sample_offset off = nil_sample_offset(slayout, s);
146             assert(off.x < 4 && off.y < 4);
147             uint32_t s_xy = off.y << 2 | off.x;
148             desc.sample_map |= s_xy << (s * 4);
149          }
150       }
151    }
152 
153    assert(sizeof(desc) <= dst_size);
154    memcpy(dst, &desc, sizeof(desc));
155 }
156 
157 static void
write_storage_image_view_desc(struct nvk_descriptor_set * set,const VkDescriptorImageInfo * const info,uint32_t binding,uint32_t elem)158 write_storage_image_view_desc(struct nvk_descriptor_set *set,
159                               const VkDescriptorImageInfo *const info,
160                               uint32_t binding, uint32_t elem)
161 {
162    uint32_t dst_size;
163    void *dst = desc_ubo_data(set, binding, elem, &dst_size);
164    get_storage_image_view_desc(info, dst, dst_size);
165 }
166 
167 static union nvk_buffer_descriptor
ubo_desc(struct nvk_physical_device * pdev,struct nvk_addr_range addr_range)168 ubo_desc(struct nvk_physical_device *pdev,
169          struct nvk_addr_range addr_range)
170 {
171    const uint32_t min_cbuf_alignment = nvk_min_cbuf_alignment(&pdev->info);
172 
173    assert(addr_range.addr % min_cbuf_alignment == 0);
174    assert(addr_range.range <= NVK_MAX_CBUF_SIZE);
175 
176    addr_range.addr = ROUND_DOWN_TO(addr_range.addr, min_cbuf_alignment);
177    addr_range.range = align(addr_range.range, min_cbuf_alignment);
178 
179    if (nvk_use_bindless_cbuf(&pdev->info)) {
180       return (union nvk_buffer_descriptor) { .cbuf = {
181          .base_addr_shift_4 = addr_range.addr >> 4,
182          .size_shift_4 = addr_range.range >> 4,
183       }};
184    } else {
185       return (union nvk_buffer_descriptor) { .addr = {
186          .base_addr = addr_range.addr,
187          .size = addr_range.range,
188       }};
189    }
190 }
191 
192 static void
write_ubo_desc(struct nvk_physical_device * pdev,struct nvk_descriptor_set * set,const VkDescriptorBufferInfo * const info,uint32_t binding,uint32_t elem)193 write_ubo_desc(struct nvk_physical_device *pdev,
194                struct nvk_descriptor_set *set,
195                const VkDescriptorBufferInfo *const info,
196                uint32_t binding, uint32_t elem)
197 {
198    VK_FROM_HANDLE(nvk_buffer, buffer, info->buffer);
199    struct nvk_addr_range addr_range =
200       nvk_buffer_addr_range(buffer, info->offset, info->range);
201 
202    const union nvk_buffer_descriptor desc = ubo_desc(pdev, addr_range);
203    write_desc(set, binding, elem, &desc, sizeof(desc));
204 }
205 
206 static void
write_dynamic_ubo_desc(struct nvk_physical_device * pdev,struct nvk_descriptor_set * set,const VkDescriptorBufferInfo * const info,uint32_t binding,uint32_t elem)207 write_dynamic_ubo_desc(struct nvk_physical_device *pdev,
208                        struct nvk_descriptor_set *set,
209                        const VkDescriptorBufferInfo *const info,
210                        uint32_t binding, uint32_t elem)
211 {
212    VK_FROM_HANDLE(nvk_buffer, buffer, info->buffer);
213    struct nvk_addr_range addr_range =
214       nvk_buffer_addr_range(buffer, info->offset, info->range);
215 
216    const struct nvk_descriptor_set_binding_layout *binding_layout =
217       &set->layout->binding[binding];
218    set->dynamic_buffers[binding_layout->dynamic_buffer_index + elem] =
219       ubo_desc(pdev, addr_range);
220 }
221 
222 static union nvk_buffer_descriptor
ssbo_desc(struct nvk_addr_range addr_range)223 ssbo_desc(struct nvk_addr_range addr_range)
224 {
225    assert(addr_range.addr % NVK_MIN_SSBO_ALIGNMENT == 0);
226    assert(addr_range.range <= UINT32_MAX);
227 
228    addr_range.addr = ROUND_DOWN_TO(addr_range.addr, NVK_MIN_SSBO_ALIGNMENT);
229    addr_range.range = align(addr_range.range, NVK_SSBO_BOUNDS_CHECK_ALIGNMENT);
230 
231    return (union nvk_buffer_descriptor) { .addr = {
232       .base_addr = addr_range.addr,
233       .size = addr_range.range,
234    }};
235 }
236 
237 static void
write_ssbo_desc(struct nvk_descriptor_set * set,const VkDescriptorBufferInfo * const info,uint32_t binding,uint32_t elem)238 write_ssbo_desc(struct nvk_descriptor_set *set,
239                 const VkDescriptorBufferInfo *const info,
240                 uint32_t binding, uint32_t elem)
241 {
242    VK_FROM_HANDLE(nvk_buffer, buffer, info->buffer);
243    struct nvk_addr_range addr_range =
244       nvk_buffer_addr_range(buffer, info->offset, info->range);
245 
246    const union nvk_buffer_descriptor desc = ssbo_desc(addr_range);
247    write_desc(set, binding, elem, &desc, sizeof(desc));
248 }
249 
250 static void
write_dynamic_ssbo_desc(struct nvk_descriptor_set * set,const VkDescriptorBufferInfo * const info,uint32_t binding,uint32_t elem)251 write_dynamic_ssbo_desc(struct nvk_descriptor_set *set,
252                         const VkDescriptorBufferInfo *const info,
253                         uint32_t binding, uint32_t elem)
254 {
255    VK_FROM_HANDLE(nvk_buffer, buffer, info->buffer);
256    struct nvk_addr_range addr_range =
257       nvk_buffer_addr_range(buffer, info->offset, info->range);
258 
259    const struct nvk_descriptor_set_binding_layout *binding_layout =
260       &set->layout->binding[binding];
261    set->dynamic_buffers[binding_layout->dynamic_buffer_index + elem] =
262       ssbo_desc(addr_range);
263 }
264 
265 static void
get_edb_buffer_view_desc(struct nvk_device * dev,const VkDescriptorAddressInfoEXT * info,void * dst,size_t dst_size)266 get_edb_buffer_view_desc(struct nvk_device *dev,
267                          const VkDescriptorAddressInfoEXT *info,
268                          void *dst, size_t dst_size)
269 {
270    struct nvk_edb_buffer_view_descriptor desc = { };
271    if (info != NULL && info->address != 0) {
272       enum pipe_format format = nvk_format_to_pipe_format(info->format);
273       desc = nvk_edb_bview_cache_get_descriptor(dev, &dev->edb_bview_cache,
274                                                 info->address, info->range,
275                                                 format);
276    }
277    assert(sizeof(desc) <= dst_size);
278    memcpy(dst, &desc, sizeof(desc));
279 }
280 
281 static void
write_buffer_view_desc(struct nvk_physical_device * pdev,struct nvk_descriptor_set * set,const VkBufferView bufferView,uint32_t binding,uint32_t elem)282 write_buffer_view_desc(struct nvk_physical_device *pdev,
283                        struct nvk_descriptor_set *set,
284                        const VkBufferView bufferView,
285                        uint32_t binding, uint32_t elem)
286 {
287    VK_FROM_HANDLE(nvk_buffer_view, view, bufferView);
288 
289    if (nvk_use_edb_buffer_views(pdev)) {
290       struct nvk_edb_buffer_view_descriptor desc = { };
291       if (view != NULL)
292          desc = view->edb_desc;
293       write_desc(set, binding, elem, &desc, sizeof(desc));
294    } else {
295       struct nvk_buffer_view_descriptor desc = { };
296       if (view != NULL)
297          desc = view->desc;
298       write_desc(set, binding, elem, &desc, sizeof(desc));
299    }
300 }
301 
302 static void
write_inline_uniform_data(struct nvk_descriptor_set * set,const VkWriteDescriptorSetInlineUniformBlock * info,uint32_t binding,uint32_t offset)303 write_inline_uniform_data(struct nvk_descriptor_set *set,
304                           const VkWriteDescriptorSetInlineUniformBlock *info,
305                           uint32_t binding, uint32_t offset)
306 {
307    assert(set->layout->binding[binding].stride == 1);
308    write_desc(set, binding, offset, info->pData, info->dataSize);
309 }
310 
311 VKAPI_ATTR void VKAPI_CALL
nvk_UpdateDescriptorSets(VkDevice device,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites,uint32_t descriptorCopyCount,const VkCopyDescriptorSet * pDescriptorCopies)312 nvk_UpdateDescriptorSets(VkDevice device,
313                          uint32_t descriptorWriteCount,
314                          const VkWriteDescriptorSet *pDescriptorWrites,
315                          uint32_t descriptorCopyCount,
316                          const VkCopyDescriptorSet *pDescriptorCopies)
317 {
318    VK_FROM_HANDLE(nvk_device, dev, device);
319    struct nvk_physical_device *pdev = nvk_device_physical(dev);
320 
321    for (uint32_t w = 0; w < descriptorWriteCount; w++) {
322       const VkWriteDescriptorSet *write = &pDescriptorWrites[w];
323       VK_FROM_HANDLE(nvk_descriptor_set, set, write->dstSet);
324 
325       switch (write->descriptorType) {
326       case VK_DESCRIPTOR_TYPE_SAMPLER:
327       case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
328       case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
329       case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
330          for (uint32_t j = 0; j < write->descriptorCount; j++) {
331             write_sampled_image_view_desc(set, write->pImageInfo + j,
332                                           write->dstBinding,
333                                           write->dstArrayElement + j,
334                                           write->descriptorType);
335          }
336          break;
337 
338       case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
339          for (uint32_t j = 0; j < write->descriptorCount; j++) {
340             write_storage_image_view_desc(set, write->pImageInfo + j,
341                                           write->dstBinding,
342                                           write->dstArrayElement + j);
343          }
344          break;
345 
346       case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
347       case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
348          for (uint32_t j = 0; j < write->descriptorCount; j++) {
349             write_buffer_view_desc(pdev, set, write->pTexelBufferView[j],
350                                    write->dstBinding, write->dstArrayElement + j);
351          }
352          break;
353 
354       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
355          for (uint32_t j = 0; j < write->descriptorCount; j++) {
356             write_ubo_desc(pdev, set, write->pBufferInfo + j,
357                            write->dstBinding,
358                            write->dstArrayElement + j);
359          }
360          break;
361 
362       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
363          for (uint32_t j = 0; j < write->descriptorCount; j++) {
364             write_ssbo_desc(set, write->pBufferInfo + j,
365                             write->dstBinding,
366                             write->dstArrayElement + j);
367          }
368          break;
369 
370       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
371          for (uint32_t j = 0; j < write->descriptorCount; j++) {
372             write_dynamic_ubo_desc(pdev, set, write->pBufferInfo + j,
373                                    write->dstBinding,
374                                    write->dstArrayElement + j);
375          }
376          break;
377 
378       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
379          for (uint32_t j = 0; j < write->descriptorCount; j++) {
380             write_dynamic_ssbo_desc(set, write->pBufferInfo + j,
381                                     write->dstBinding,
382                                     write->dstArrayElement + j);
383          }
384          break;
385 
386       case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK: {
387          const VkWriteDescriptorSetInlineUniformBlock *write_inline =
388             vk_find_struct_const(write->pNext,
389                                  WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK);
390          assert(write_inline->dataSize == write->descriptorCount);
391          write_inline_uniform_data(set, write_inline, write->dstBinding,
392                                    write->dstArrayElement);
393          break;
394       }
395 
396       default:
397          break;
398       }
399    }
400 
401    for (uint32_t i = 0; i < descriptorCopyCount; i++) {
402       const VkCopyDescriptorSet *copy = &pDescriptorCopies[i];
403       VK_FROM_HANDLE(nvk_descriptor_set, src, copy->srcSet);
404       VK_FROM_HANDLE(nvk_descriptor_set, dst, copy->dstSet);
405 
406       const struct nvk_descriptor_set_binding_layout *src_binding_layout =
407          &src->layout->binding[copy->srcBinding];
408       const struct nvk_descriptor_set_binding_layout *dst_binding_layout =
409          &dst->layout->binding[copy->dstBinding];
410 
411       if (dst_binding_layout->stride > 0 && src_binding_layout->stride > 0) {
412          for (uint32_t j = 0; j < copy->descriptorCount; j++) {
413             ASSERTED uint32_t dst_max_size, src_max_size;
414             void *dst_map = desc_ubo_data(dst, copy->dstBinding,
415                                           copy->dstArrayElement + j,
416                                           &dst_max_size);
417             const void *src_map = desc_ubo_data(src, copy->srcBinding,
418                                                 copy->srcArrayElement + j,
419                                                 &src_max_size);
420             const uint32_t copy_size = MIN2(dst_binding_layout->stride,
421                                             src_binding_layout->stride);
422             assert(copy_size <= dst_max_size && copy_size <= src_max_size);
423             memcpy(dst_map, src_map, copy_size);
424          }
425       }
426 
427       if (vk_descriptor_type_is_dynamic(src_binding_layout->type)) {
428          const uint32_t dst_dyn_start =
429             dst_binding_layout->dynamic_buffer_index + copy->dstArrayElement;
430          const uint32_t src_dyn_start =
431             src_binding_layout->dynamic_buffer_index + copy->srcArrayElement;
432          typed_memcpy(&dst->dynamic_buffers[dst_dyn_start],
433                       &src->dynamic_buffers[src_dyn_start],
434                       copy->descriptorCount);
435       }
436    }
437 }
438 
439 void
nvk_push_descriptor_set_update(struct nvk_device * dev,struct nvk_push_descriptor_set * push_set,struct nvk_descriptor_set_layout * layout,uint32_t write_count,const VkWriteDescriptorSet * writes)440 nvk_push_descriptor_set_update(struct nvk_device *dev,
441                                struct nvk_push_descriptor_set *push_set,
442                                struct nvk_descriptor_set_layout *layout,
443                                uint32_t write_count,
444                                const VkWriteDescriptorSet *writes)
445 {
446    struct nvk_physical_device *pdev = nvk_device_physical(dev);
447 
448    assert(layout->non_variable_descriptor_buffer_size < sizeof(push_set->data));
449    struct nvk_descriptor_set set = {
450       .layout = layout,
451       .size = sizeof(push_set->data),
452       .map = push_set->data,
453    };
454 
455    for (uint32_t w = 0; w < write_count; w++) {
456       const VkWriteDescriptorSet *write = &writes[w];
457       assert(write->dstSet == VK_NULL_HANDLE);
458 
459       switch (write->descriptorType) {
460       case VK_DESCRIPTOR_TYPE_SAMPLER:
461       case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
462       case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
463       case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
464          for (uint32_t j = 0; j < write->descriptorCount; j++) {
465             write_sampled_image_view_desc(&set, write->pImageInfo + j,
466                                           write->dstBinding,
467                                           write->dstArrayElement + j,
468                                           write->descriptorType);
469          }
470          break;
471 
472       case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
473          for (uint32_t j = 0; j < write->descriptorCount; j++) {
474             write_storage_image_view_desc(&set, write->pImageInfo + j,
475                                           write->dstBinding,
476                                           write->dstArrayElement + j);
477          }
478          break;
479 
480       case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
481       case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
482          for (uint32_t j = 0; j < write->descriptorCount; j++) {
483             write_buffer_view_desc(pdev, &set, write->pTexelBufferView[j],
484                                    write->dstBinding, write->dstArrayElement + j);
485          }
486          break;
487 
488       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
489          for (uint32_t j = 0; j < write->descriptorCount; j++) {
490             write_ubo_desc(pdev, &set, write->pBufferInfo + j,
491                            write->dstBinding,
492                            write->dstArrayElement + j);
493          }
494          break;
495 
496       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
497          for (uint32_t j = 0; j < write->descriptorCount; j++) {
498             write_ssbo_desc(&set, write->pBufferInfo + j,
499                             write->dstBinding,
500                             write->dstArrayElement + j);
501          }
502          break;
503 
504       default:
505          break;
506       }
507    }
508 }
509 
510 static void
511 nvk_descriptor_pool_free(struct nvk_descriptor_pool *pool,
512                          uint64_t addr, uint64_t size);
513 
514 static void
nvk_descriptor_set_destroy(struct nvk_device * dev,struct nvk_descriptor_pool * pool,struct nvk_descriptor_set * set)515 nvk_descriptor_set_destroy(struct nvk_device *dev,
516                            struct nvk_descriptor_pool *pool,
517                            struct nvk_descriptor_set *set)
518 {
519    list_del(&set->link);
520    if (set->size > 0)
521       nvk_descriptor_pool_free(pool, set->addr, set->size);
522    vk_descriptor_set_layout_unref(&dev->vk, &set->layout->vk);
523 
524    vk_object_free(&dev->vk, NULL, set);
525 }
526 
527 static void
nvk_destroy_descriptor_pool(struct nvk_device * dev,const VkAllocationCallbacks * pAllocator,struct nvk_descriptor_pool * pool)528 nvk_destroy_descriptor_pool(struct nvk_device *dev,
529                             const VkAllocationCallbacks *pAllocator,
530                             struct nvk_descriptor_pool *pool)
531 {
532    list_for_each_entry_safe(struct nvk_descriptor_set, set, &pool->sets, link)
533       nvk_descriptor_set_destroy(dev, pool, set);
534 
535    util_vma_heap_finish(&pool->heap);
536 
537    if (pool->mem != NULL)
538       nvkmd_mem_unref(pool->mem);
539 
540    if (pool->host_mem != NULL)
541       vk_free2(&dev->vk.alloc, pAllocator, pool->host_mem);
542 
543    vk_object_free(&dev->vk, pAllocator, pool);
544 }
545 
546 #define HOST_ONLY_ADDR 0xc0ffee0000000000ull
547 
548 VKAPI_ATTR VkResult VKAPI_CALL
nvk_CreateDescriptorPool(VkDevice _device,const VkDescriptorPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorPool * pDescriptorPool)549 nvk_CreateDescriptorPool(VkDevice _device,
550                          const VkDescriptorPoolCreateInfo *pCreateInfo,
551                          const VkAllocationCallbacks *pAllocator,
552                          VkDescriptorPool *pDescriptorPool)
553 {
554    VK_FROM_HANDLE(nvk_device, dev, _device);
555    struct nvk_physical_device *pdev = nvk_device_physical(dev);
556    struct nvk_descriptor_pool *pool;
557    VkResult result;
558 
559    pool = vk_object_zalloc(&dev->vk, pAllocator, sizeof(*pool),
560                            VK_OBJECT_TYPE_DESCRIPTOR_POOL);
561    if (!pool)
562       return vk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);
563 
564    list_inithead(&pool->sets);
565 
566    const VkMutableDescriptorTypeCreateInfoEXT *mutable_info =
567       vk_find_struct_const(pCreateInfo->pNext,
568                            MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_EXT);
569 
570    uint32_t max_align = 0;
571    for (unsigned i = 0; i < pCreateInfo->poolSizeCount; ++i) {
572       const VkMutableDescriptorTypeListEXT *type_list = NULL;
573       if (pCreateInfo->pPoolSizes[i].type == VK_DESCRIPTOR_TYPE_MUTABLE_EXT &&
574           mutable_info && i < mutable_info->mutableDescriptorTypeListCount)
575             type_list = &mutable_info->pMutableDescriptorTypeLists[i];
576 
577       uint32_t stride, alignment;
578       nvk_descriptor_stride_align_for_type(pdev, 0 /* not DESCRIPTOR_BUFFER */,
579                                            pCreateInfo->pPoolSizes[i].type,
580                                            type_list, &stride, &alignment);
581       max_align = MAX2(max_align, alignment);
582    }
583 
584    uint64_t mem_size = 0;
585    for (unsigned i = 0; i < pCreateInfo->poolSizeCount; ++i) {
586       const VkMutableDescriptorTypeListEXT *type_list = NULL;
587       if (pCreateInfo->pPoolSizes[i].type == VK_DESCRIPTOR_TYPE_MUTABLE_EXT &&
588           mutable_info && i < mutable_info->mutableDescriptorTypeListCount)
589             type_list = &mutable_info->pMutableDescriptorTypeLists[i];
590 
591       uint32_t stride, alignment;
592       nvk_descriptor_stride_align_for_type(pdev, 0 /* not DESCRIPTOR_BUFFER */,
593                                            pCreateInfo->pPoolSizes[i].type,
594                                            type_list, &stride, &alignment);
595       mem_size += MAX2(stride, max_align) *
596                  pCreateInfo->pPoolSizes[i].descriptorCount;
597    }
598 
599    /* Individual descriptor sets are aligned to the min UBO alignment to
600     * ensure that we don't end up with unaligned data access in any shaders.
601     * This means that each descriptor buffer allocated may burn up to 16B of
602     * extra space to get the right alignment.  (Technically, it's at most 28B
603     * because we're always going to start at least 4B aligned but we're being
604     * conservative here.)  Allocate enough extra space that we can chop it
605     * into maxSets pieces and align each one of them to 32B.
606     */
607    mem_size += nvk_min_cbuf_alignment(&pdev->info) * pCreateInfo->maxSets;
608 
609    if (mem_size > 0) {
610       if (pCreateInfo->flags & VK_DESCRIPTOR_POOL_CREATE_HOST_ONLY_BIT_EXT) {
611          pool->host_mem = vk_zalloc2(&dev->vk.alloc, pAllocator, mem_size,
612                                      16, VK_OBJECT_TYPE_DESCRIPTOR_POOL);
613          if (pool->host_mem == NULL) {
614             nvk_destroy_descriptor_pool(dev, pAllocator, pool);
615             return vk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);
616          }
617 
618          util_vma_heap_init(&pool->heap, HOST_ONLY_ADDR, mem_size);
619       } else {
620          result = nvkmd_dev_alloc_mapped_mem(dev->nvkmd, &dev->vk.base,
621                                              mem_size, 0, NVKMD_MEM_LOCAL,
622                                              NVKMD_MEM_MAP_WR, &pool->mem);
623          if (result != VK_SUCCESS) {
624             nvk_destroy_descriptor_pool(dev, pAllocator, pool);
625             return result;
626          }
627 
628          /* The BO may be larger thanks to GPU page alignment.  We may as well
629           * make that extra space available to the client.
630           */
631          assert(pool->mem->size_B >= mem_size);
632          mem_size = pool->mem->size_B;
633 
634          util_vma_heap_init(&pool->heap, pool->mem->va->addr, mem_size);
635       }
636    } else {
637       util_vma_heap_init(&pool->heap, 0, 0);
638    }
639 
640    pool->mem_size_B = mem_size;
641 
642    *pDescriptorPool = nvk_descriptor_pool_to_handle(pool);
643    return VK_SUCCESS;
644 }
645 
646 static VkResult
nvk_descriptor_pool_alloc(struct nvk_descriptor_pool * pool,uint64_t size,uint64_t alignment,uint64_t * addr_out,void ** map_out)647 nvk_descriptor_pool_alloc(struct nvk_descriptor_pool *pool,
648                           uint64_t size, uint64_t alignment,
649                           uint64_t *addr_out, void **map_out)
650 {
651    assert(size > 0);
652    assert(size % alignment == 0);
653 
654    if (size > pool->heap.free_size)
655       return VK_ERROR_OUT_OF_POOL_MEMORY;
656 
657    uint64_t addr = util_vma_heap_alloc(&pool->heap, size, alignment);
658    if (addr == 0)
659       return VK_ERROR_FRAGMENTED_POOL;
660 
661    if (pool->host_mem != NULL) {
662       /* In this case, the address is a host address */
663       assert(addr >= HOST_ONLY_ADDR);
664       assert(addr + size <= HOST_ONLY_ADDR + pool->mem_size_B);
665       uint64_t offset = addr - HOST_ONLY_ADDR;
666 
667       *addr_out = addr;
668       *map_out = pool->host_mem + offset;
669    } else {
670       assert(addr >= pool->mem->va->addr);
671       assert(addr + size <= pool->mem->va->addr + pool->mem_size_B);
672       uint64_t offset = addr - pool->mem->va->addr;
673 
674       *addr_out = addr;
675       *map_out = pool->mem->map + offset;
676    }
677 
678    return VK_SUCCESS;
679 }
680 
681 static void
nvk_descriptor_pool_free(struct nvk_descriptor_pool * pool,uint64_t addr,uint64_t size)682 nvk_descriptor_pool_free(struct nvk_descriptor_pool *pool,
683                          uint64_t addr, uint64_t size)
684 {
685    assert(size > 0);
686    if (pool->host_mem != NULL) {
687       assert(addr >= HOST_ONLY_ADDR);
688       assert(addr + size <= HOST_ONLY_ADDR + pool->mem_size_B);
689    } else {
690       assert(addr >= pool->mem->va->addr);
691       assert(addr + size <= pool->mem->va->addr + pool->mem_size_B);
692    }
693    util_vma_heap_free(&pool->heap, addr, size);
694 }
695 
696 static VkResult
nvk_descriptor_set_create(struct nvk_device * dev,struct nvk_descriptor_pool * pool,struct nvk_descriptor_set_layout * layout,uint32_t variable_count,struct nvk_descriptor_set ** out_set)697 nvk_descriptor_set_create(struct nvk_device *dev,
698                           struct nvk_descriptor_pool *pool,
699                           struct nvk_descriptor_set_layout *layout,
700                           uint32_t variable_count,
701                           struct nvk_descriptor_set **out_set)
702 {
703    struct nvk_physical_device *pdev = nvk_device_physical(dev);
704    struct nvk_descriptor_set *set;
705    VkResult result;
706 
707    uint32_t mem_size = sizeof(struct nvk_descriptor_set) +
708       layout->dynamic_buffer_count * sizeof(struct nvk_buffer_address);
709 
710    set = vk_object_zalloc(&dev->vk, NULL, mem_size,
711                           VK_OBJECT_TYPE_DESCRIPTOR_SET);
712    if (!set)
713       return vk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);
714 
715    set->size = layout->non_variable_descriptor_buffer_size;
716 
717    if (layout->binding_count > 0 &&
718        (layout->binding[layout->binding_count - 1].flags &
719         VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT)) {
720       uint32_t stride = layout->binding[layout->binding_count-1].stride;
721       set->size += stride * variable_count;
722    }
723 
724    uint32_t alignment = nvk_min_cbuf_alignment(&pdev->info);
725    set->size = align64(set->size, alignment);
726 
727    if (set->size > 0) {
728       result = nvk_descriptor_pool_alloc(pool, set->size, alignment,
729                                          &set->addr, &set->map);
730       if (result != VK_SUCCESS) {
731          vk_object_free(&dev->vk, NULL, set);
732          return result;
733       }
734    }
735 
736    vk_descriptor_set_layout_ref(&layout->vk);
737    set->layout = layout;
738 
739    for (uint32_t b = 0; b < layout->binding_count; b++) {
740       if (layout->binding[b].type != VK_DESCRIPTOR_TYPE_SAMPLER &&
741           layout->binding[b].type != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
742          continue;
743 
744       if (layout->binding[b].immutable_samplers == NULL)
745          continue;
746 
747       uint32_t array_size = layout->binding[b].array_size;
748       if (layout->binding[b].flags &
749           VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT)
750          array_size = variable_count;
751 
752       const VkDescriptorImageInfo empty = {};
753       for (uint32_t j = 0; j < array_size; j++) {
754          write_sampled_image_view_desc(set, &empty, b, j,
755                                        layout->binding[b].type);
756       }
757    }
758 
759    list_addtail(&set->link, &pool->sets);
760    *out_set = set;
761 
762    return VK_SUCCESS;
763 }
764 
765 VKAPI_ATTR VkResult VKAPI_CALL
nvk_AllocateDescriptorSets(VkDevice device,const VkDescriptorSetAllocateInfo * pAllocateInfo,VkDescriptorSet * pDescriptorSets)766 nvk_AllocateDescriptorSets(VkDevice device,
767                            const VkDescriptorSetAllocateInfo *pAllocateInfo,
768                            VkDescriptorSet *pDescriptorSets)
769 {
770    VK_FROM_HANDLE(nvk_device, dev, device);
771    VK_FROM_HANDLE(nvk_descriptor_pool, pool, pAllocateInfo->descriptorPool);
772 
773    VkResult result = VK_SUCCESS;
774    uint32_t i;
775 
776    struct nvk_descriptor_set *set = NULL;
777 
778    const VkDescriptorSetVariableDescriptorCountAllocateInfo *var_desc_count =
779       vk_find_struct_const(pAllocateInfo->pNext,
780                            DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO);
781 
782    /* allocate a set of buffers for each shader to contain descriptors */
783    for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
784       VK_FROM_HANDLE(nvk_descriptor_set_layout, layout,
785                      pAllocateInfo->pSetLayouts[i]);
786       /* If descriptorSetCount is zero or this structure is not included in
787        * the pNext chain, then the variable lengths are considered to be zero.
788        */
789       const uint32_t variable_count =
790          var_desc_count && var_desc_count->descriptorSetCount > 0 ?
791          var_desc_count->pDescriptorCounts[i] : 0;
792 
793       result = nvk_descriptor_set_create(dev, pool, layout,
794                                          variable_count, &set);
795       if (result != VK_SUCCESS)
796          break;
797 
798       pDescriptorSets[i] = nvk_descriptor_set_to_handle(set);
799    }
800 
801    if (result != VK_SUCCESS) {
802       nvk_FreeDescriptorSets(device, pAllocateInfo->descriptorPool, i, pDescriptorSets);
803       for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
804          pDescriptorSets[i] = VK_NULL_HANDLE;
805       }
806    }
807    return result;
808 }
809 
810 VKAPI_ATTR VkResult VKAPI_CALL
nvk_FreeDescriptorSets(VkDevice device,VkDescriptorPool descriptorPool,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets)811 nvk_FreeDescriptorSets(VkDevice device,
812                        VkDescriptorPool descriptorPool,
813                        uint32_t descriptorSetCount,
814                        const VkDescriptorSet *pDescriptorSets)
815 {
816    VK_FROM_HANDLE(nvk_device, dev, device);
817    VK_FROM_HANDLE(nvk_descriptor_pool, pool, descriptorPool);
818 
819    for (uint32_t i = 0; i < descriptorSetCount; i++) {
820       VK_FROM_HANDLE(nvk_descriptor_set, set, pDescriptorSets[i]);
821 
822       if (set)
823          nvk_descriptor_set_destroy(dev, pool, set);
824    }
825    return VK_SUCCESS;
826 }
827 
828 VKAPI_ATTR void VKAPI_CALL
nvk_DestroyDescriptorPool(VkDevice device,VkDescriptorPool _pool,const VkAllocationCallbacks * pAllocator)829 nvk_DestroyDescriptorPool(VkDevice device,
830                           VkDescriptorPool _pool,
831                           const VkAllocationCallbacks *pAllocator)
832 {
833    VK_FROM_HANDLE(nvk_device, dev, device);
834    VK_FROM_HANDLE(nvk_descriptor_pool, pool, _pool);
835 
836    if (!_pool)
837       return;
838 
839    nvk_destroy_descriptor_pool(dev, pAllocator, pool);
840 }
841 
842 VKAPI_ATTR VkResult VKAPI_CALL
nvk_ResetDescriptorPool(VkDevice device,VkDescriptorPool descriptorPool,VkDescriptorPoolResetFlags flags)843 nvk_ResetDescriptorPool(VkDevice device,
844                         VkDescriptorPool descriptorPool,
845                         VkDescriptorPoolResetFlags flags)
846 {
847    VK_FROM_HANDLE(nvk_device, dev, device);
848    VK_FROM_HANDLE(nvk_descriptor_pool, pool, descriptorPool);
849 
850    list_for_each_entry_safe(struct nvk_descriptor_set, set, &pool->sets, link)
851       nvk_descriptor_set_destroy(dev, pool, set);
852 
853    return VK_SUCCESS;
854 }
855 
856 static void
nvk_descriptor_set_write_template(struct nvk_device * dev,struct nvk_descriptor_set * set,const struct vk_descriptor_update_template * template,const void * data)857 nvk_descriptor_set_write_template(struct nvk_device *dev,
858                                   struct nvk_descriptor_set *set,
859                                   const struct vk_descriptor_update_template *template,
860                                   const void *data)
861 {
862    struct nvk_physical_device *pdev = nvk_device_physical(dev);
863 
864    for (uint32_t i = 0; i < template->entry_count; i++) {
865       const struct vk_descriptor_template_entry *entry =
866          &template->entries[i];
867 
868       switch (entry->type) {
869       case VK_DESCRIPTOR_TYPE_SAMPLER:
870       case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
871       case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
872       case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
873          for (uint32_t j = 0; j < entry->array_count; j++) {
874             const VkDescriptorImageInfo *info =
875                data + entry->offset + j * entry->stride;
876 
877             write_sampled_image_view_desc(set, info,
878                                           entry->binding,
879                                           entry->array_element + j,
880                                           entry->type);
881          }
882          break;
883 
884       case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
885          for (uint32_t j = 0; j < entry->array_count; j++) {
886             const VkDescriptorImageInfo *info =
887                data + entry->offset + j * entry->stride;
888 
889             write_storage_image_view_desc(set, info,
890                                           entry->binding,
891                                           entry->array_element + j);
892          }
893          break;
894 
895       case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
896       case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
897          for (uint32_t j = 0; j < entry->array_count; j++) {
898             const VkBufferView *bview =
899                data + entry->offset + j * entry->stride;
900 
901             write_buffer_view_desc(pdev, set, *bview,
902                                    entry->binding,
903                                    entry->array_element + j);
904          }
905          break;
906 
907       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
908          for (uint32_t j = 0; j < entry->array_count; j++) {
909             const VkDescriptorBufferInfo *info =
910                data + entry->offset + j * entry->stride;
911 
912             write_ubo_desc(pdev, set, info,
913                            entry->binding,
914                            entry->array_element + j);
915          }
916          break;
917 
918       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
919          for (uint32_t j = 0; j < entry->array_count; j++) {
920             const VkDescriptorBufferInfo *info =
921                data + entry->offset + j * entry->stride;
922 
923             write_ssbo_desc(set, info,
924                             entry->binding,
925                             entry->array_element + j);
926          }
927          break;
928 
929       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
930          for (uint32_t j = 0; j < entry->array_count; j++) {
931             const VkDescriptorBufferInfo *info =
932                data + entry->offset + j * entry->stride;
933 
934             write_dynamic_ubo_desc(pdev, set, info,
935                                    entry->binding,
936                                    entry->array_element + j);
937          }
938          break;
939 
940       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
941          for (uint32_t j = 0; j < entry->array_count; j++) {
942             const VkDescriptorBufferInfo *info =
943                data + entry->offset + j * entry->stride;
944 
945             write_dynamic_ssbo_desc(set, info,
946                                     entry->binding,
947                                     entry->array_element + j);
948          }
949          break;
950 
951       case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK:
952          write_desc(set,
953                     entry->binding,
954                     entry->array_element,
955                     data + entry->offset,
956                     entry->array_count);
957          break;
958 
959       default:
960          break;
961       }
962    }
963 }
964 
965 VKAPI_ATTR void VKAPI_CALL
nvk_UpdateDescriptorSetWithTemplate(VkDevice device,VkDescriptorSet descriptorSet,VkDescriptorUpdateTemplate descriptorUpdateTemplate,const void * pData)966 nvk_UpdateDescriptorSetWithTemplate(VkDevice device,
967                                     VkDescriptorSet descriptorSet,
968                                     VkDescriptorUpdateTemplate descriptorUpdateTemplate,
969                                     const void *pData)
970 {
971    VK_FROM_HANDLE(nvk_device, dev, device);
972    VK_FROM_HANDLE(nvk_descriptor_set, set, descriptorSet);
973    VK_FROM_HANDLE(vk_descriptor_update_template, template,
974                   descriptorUpdateTemplate);
975 
976    nvk_descriptor_set_write_template(dev, set, template, pData);
977 }
978 
979 void
nvk_push_descriptor_set_update_template(struct nvk_device * dev,struct nvk_push_descriptor_set * push_set,struct nvk_descriptor_set_layout * layout,const struct vk_descriptor_update_template * template,const void * data)980 nvk_push_descriptor_set_update_template(
981    struct nvk_device *dev,
982    struct nvk_push_descriptor_set *push_set,
983    struct nvk_descriptor_set_layout *layout,
984    const struct vk_descriptor_update_template *template,
985    const void *data)
986 {
987    struct nvk_descriptor_set tmp_set = {
988       .layout = layout,
989       .size = sizeof(push_set->data),
990       .map = push_set->data,
991    };
992    nvk_descriptor_set_write_template(dev, &tmp_set, template, data);
993 }
994 
995 VKAPI_ATTR void VKAPI_CALL
nvk_GetDescriptorEXT(VkDevice _device,const VkDescriptorGetInfoEXT * pDescriptorInfo,size_t dataSize,void * pDescriptor)996 nvk_GetDescriptorEXT(VkDevice _device,
997                      const VkDescriptorGetInfoEXT *pDescriptorInfo,
998                      size_t dataSize, void *pDescriptor)
999 {
1000    VK_FROM_HANDLE(nvk_device, dev, _device);
1001    struct nvk_physical_device *pdev = nvk_device_physical(dev);
1002 
1003    switch (pDescriptorInfo->type) {
1004    case VK_DESCRIPTOR_TYPE_SAMPLER: {
1005       const VkDescriptorImageInfo info = {
1006          .sampler = *pDescriptorInfo->data.pSampler,
1007       };
1008       get_sampled_image_view_desc(VK_DESCRIPTOR_TYPE_SAMPLER,
1009                                   &info, pDescriptor, dataSize);
1010       break;
1011    }
1012 
1013    case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1014       get_sampled_image_view_desc(pDescriptorInfo->type,
1015                                   pDescriptorInfo->data.pCombinedImageSampler,
1016                                   pDescriptor, dataSize);
1017       break;
1018 
1019    case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1020       get_sampled_image_view_desc(pDescriptorInfo->type,
1021                                   pDescriptorInfo->data.pSampledImage,
1022                                   pDescriptor, dataSize);
1023       break;
1024 
1025    case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1026       get_storage_image_view_desc(pDescriptorInfo->data.pStorageImage,
1027                                   pDescriptor, dataSize);
1028       break;
1029 
1030    case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1031       get_edb_buffer_view_desc(dev, pDescriptorInfo->data.pUniformTexelBuffer,
1032                                pDescriptor, dataSize);
1033       break;
1034 
1035    case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1036       get_edb_buffer_view_desc(dev, pDescriptorInfo->data.pStorageTexelBuffer,
1037                                pDescriptor, dataSize);
1038       break;
1039 
1040    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: {
1041       struct nvk_addr_range addr_range = { };
1042       if (pDescriptorInfo->data.pUniformBuffer != NULL &&
1043           pDescriptorInfo->data.pUniformBuffer->address != 0) {
1044          addr_range = (const struct nvk_addr_range) {
1045             .addr = pDescriptorInfo->data.pUniformBuffer->address,
1046             .range = pDescriptorInfo->data.pUniformBuffer->range,
1047          };
1048       }
1049       union nvk_buffer_descriptor desc = ubo_desc(pdev, addr_range);
1050       assert(sizeof(desc) <= dataSize);
1051       memcpy(pDescriptor, &desc, sizeof(desc));
1052       break;
1053    }
1054 
1055    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: {
1056       struct nvk_addr_range addr_range = { };
1057       if (pDescriptorInfo->data.pUniformBuffer != NULL &&
1058           pDescriptorInfo->data.pUniformBuffer->address != 0) {
1059          addr_range = (const struct nvk_addr_range) {
1060             .addr = pDescriptorInfo->data.pUniformBuffer->address,
1061             .range = pDescriptorInfo->data.pUniformBuffer->range,
1062          };
1063       }
1064       union nvk_buffer_descriptor desc = ssbo_desc(addr_range);
1065       assert(sizeof(desc) <= dataSize);
1066       memcpy(pDescriptor, &desc, sizeof(desc));
1067       break;
1068    }
1069 
1070    case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1071       get_sampled_image_view_desc(pDescriptorInfo->type,
1072                                   pDescriptorInfo->data.pInputAttachmentImage,
1073                                   pDescriptor, dataSize);
1074       break;
1075 
1076    default:
1077       unreachable("Unknown descriptor type");
1078    }
1079 }
1080