• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2021 Valve Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Mike Blumenkrantz <michael.blumenkrantz@gmail.com>
25  */
26 #include "tgsi/tgsi_from_mesa.h"
27 
28 
29 
30 #include "zink_context.h"
31 #include "zink_compiler.h"
32 #include "zink_descriptors.h"
33 #include "zink_program.h"
34 #include "zink_resource.h"
35 #include "zink_screen.h"
36 
37 #define MAX_LAZY_DESCRIPTORS (ZINK_DEFAULT_MAX_DESCS / 10)
38 
39 struct zink_descriptor_data_lazy {
40    struct zink_descriptor_data base;
41    VkDescriptorUpdateTemplateEntry push_entries[PIPE_SHADER_TYPES]; //gfx+fbfetch
42    VkDescriptorUpdateTemplateEntry compute_push_entry;
43    bool push_state_changed[2]; //gfx, compute
44    uint8_t state_changed[2]; //gfx, compute
45 };
46 
47 struct zink_descriptor_pool {
48    VkDescriptorPool pool;
49    VkDescriptorSet sets[MAX_LAZY_DESCRIPTORS];
50    unsigned set_idx;
51    unsigned sets_alloc;
52 };
53 
54 struct zink_batch_descriptor_data_lazy {
55    struct zink_batch_descriptor_data base;
56    struct util_dynarray overflowed_pools;
57    struct hash_table pools[ZINK_DESCRIPTOR_TYPES];
58    struct zink_descriptor_pool *push_pool[2];
59    struct zink_program *pg[2]; //gfx, compute
60    uint32_t compat_id[2];
61    VkDescriptorSetLayout dsl[2][ZINK_DESCRIPTOR_TYPES];
62    VkDescriptorSet sets[2][ZINK_DESCRIPTOR_TYPES + 1];
63    unsigned push_usage[2];
64    bool has_fbfetch;
65 };
66 
67 ALWAYS_INLINE static struct zink_descriptor_data_lazy *
dd_lazy(struct zink_context * ctx)68 dd_lazy(struct zink_context *ctx)
69 {
70    return (struct zink_descriptor_data_lazy*)ctx->dd;
71 }
72 
73 ALWAYS_INLINE static struct zink_batch_descriptor_data_lazy *
bdd_lazy(struct zink_batch_state * bs)74 bdd_lazy(struct zink_batch_state *bs)
75 {
76    return (struct zink_batch_descriptor_data_lazy*)bs->dd;
77 }
78 
79 static void
init_template_entry(struct zink_shader * shader,enum zink_descriptor_type type,unsigned idx,VkDescriptorUpdateTemplateEntry * entry,unsigned * entry_idx,bool flatten_dynamic)80 init_template_entry(struct zink_shader *shader, enum zink_descriptor_type type,
81                     unsigned idx, VkDescriptorUpdateTemplateEntry *entry, unsigned *entry_idx, bool flatten_dynamic)
82 {
83     int index = shader->bindings[type][idx].index;
84     enum pipe_shader_type stage = pipe_shader_type_from_mesa(shader->nir->info.stage);
85     entry->dstArrayElement = 0;
86     entry->dstBinding = shader->bindings[type][idx].binding;
87     entry->descriptorCount = shader->bindings[type][idx].size;
88     if (shader->bindings[type][idx].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC && flatten_dynamic)
89        /* filter out DYNAMIC type here */
90        entry->descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
91     else
92        entry->descriptorType = shader->bindings[type][idx].type;
93     switch (shader->bindings[type][idx].type) {
94     case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
95     case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
96        entry->offset = offsetof(struct zink_context, di.ubos[stage][index]);
97        entry->stride = sizeof(VkDescriptorBufferInfo);
98        break;
99     case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
100        entry->offset = offsetof(struct zink_context, di.textures[stage][index]);
101        entry->stride = sizeof(VkDescriptorImageInfo);
102        break;
103     case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
104        entry->offset = offsetof(struct zink_context, di.tbos[stage][index]);
105        entry->stride = sizeof(VkBufferView);
106        break;
107     case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
108        entry->offset = offsetof(struct zink_context, di.ssbos[stage][index]);
109        entry->stride = sizeof(VkDescriptorBufferInfo);
110        break;
111     case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
112        entry->offset = offsetof(struct zink_context, di.images[stage][index]);
113        entry->stride = sizeof(VkDescriptorImageInfo);
114        break;
115     case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
116        entry->offset = offsetof(struct zink_context, di.texel_images[stage][index]);
117        entry->stride = sizeof(VkBufferView);
118        break;
119     default:
120        unreachable("unknown type");
121     }
122     (*entry_idx)++;
123 }
124 
125 static uint16_t
descriptor_program_num_sizes(VkDescriptorPoolSize * sizes,enum zink_descriptor_type type)126 descriptor_program_num_sizes(VkDescriptorPoolSize *sizes, enum zink_descriptor_type type)
127 {
128    switch (type) {
129    case ZINK_DESCRIPTOR_TYPE_UBO:
130       return !!sizes[ZDS_INDEX_UBO].descriptorCount;
131    case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
132       return !!sizes[ZDS_INDEX_COMBINED_SAMPLER].descriptorCount +
133              !!sizes[ZDS_INDEX_UNIFORM_TEXELS].descriptorCount;
134    case ZINK_DESCRIPTOR_TYPE_SSBO:
135       return !!sizes[ZDS_INDEX_STORAGE_BUFFER].descriptorCount;
136    case ZINK_DESCRIPTOR_TYPE_IMAGE:
137       return !!sizes[ZDS_INDEX_STORAGE_IMAGE].descriptorCount +
138              !!sizes[ZDS_INDEX_STORAGE_TEXELS].descriptorCount;
139    default: break;
140    }
141    unreachable("unknown type");
142 }
143 
144 static uint16_t
descriptor_program_num_sizes_compact(VkDescriptorPoolSize * sizes,unsigned desc_set)145 descriptor_program_num_sizes_compact(VkDescriptorPoolSize *sizes, unsigned desc_set)
146 {
147    switch (desc_set) {
148    case ZINK_DESCRIPTOR_TYPE_UBO:
149       return !!sizes[ZDS_INDEX_COMP_UBO].descriptorCount + !!sizes[ZDS_INDEX_COMP_STORAGE_BUFFER].descriptorCount;
150    case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
151       return !!sizes[ZDS_INDEX_COMP_COMBINED_SAMPLER].descriptorCount +
152              !!sizes[ZDS_INDEX_COMP_UNIFORM_TEXELS].descriptorCount +
153              !!sizes[ZDS_INDEX_COMP_STORAGE_IMAGE].descriptorCount +
154              !!sizes[ZDS_INDEX_COMP_STORAGE_TEXELS].descriptorCount;
155    case ZINK_DESCRIPTOR_TYPE_SSBO:
156    case ZINK_DESCRIPTOR_TYPE_IMAGE:
157    default: break;
158    }
159    unreachable("unknown type");
160 }
161 
162 bool
zink_descriptor_program_init_lazy(struct zink_context * ctx,struct zink_program * pg)163 zink_descriptor_program_init_lazy(struct zink_context *ctx, struct zink_program *pg)
164 {
165    struct zink_screen *screen = zink_screen(ctx->base.screen);
166    VkDescriptorSetLayoutBinding bindings[ZINK_DESCRIPTOR_TYPES][PIPE_SHADER_TYPES * 64];
167    VkDescriptorUpdateTemplateEntry entries[ZINK_DESCRIPTOR_TYPES][PIPE_SHADER_TYPES * 64];
168    unsigned num_bindings[ZINK_DESCRIPTOR_TYPES] = {0};
169    uint8_t has_bindings = 0;
170    unsigned push_count = 0;
171    uint16_t num_type_sizes[ZINK_DESCRIPTOR_TYPES];
172    VkDescriptorPoolSize sizes[6] = {0}; //zink_descriptor_size_index
173 
174    struct zink_shader **stages;
175    if (pg->is_compute)
176       stages = &((struct zink_compute_program*)pg)->shader;
177    else
178       stages = ((struct zink_gfx_program*)pg)->shaders;
179 
180    if (!pg->dd)
181       pg->dd = (void*)rzalloc(pg, struct zink_program_descriptor_data);
182    if (!pg->dd)
183       return false;
184 
185    if (!pg->is_compute && stages[PIPE_SHADER_FRAGMENT]->nir->info.fs.uses_fbfetch_output) {
186       zink_descriptor_util_init_fbfetch(ctx);
187       push_count = 1;
188       pg->dd->fbfetch = true;
189    }
190 
191    unsigned entry_idx[ZINK_DESCRIPTOR_TYPES] = {0};
192 
193    unsigned num_shaders = pg->is_compute ? 1 : ZINK_SHADER_COUNT;
194    bool have_push = screen->info.have_KHR_push_descriptor;
195    for (int i = 0; i < num_shaders; i++) {
196       struct zink_shader *shader = stages[i];
197       if (!shader)
198          continue;
199 
200       enum pipe_shader_type stage = pipe_shader_type_from_mesa(shader->nir->info.stage);
201       VkShaderStageFlagBits stage_flags = zink_shader_stage(stage);
202       for (int j = 0; j < ZINK_DESCRIPTOR_TYPES; j++) {
203          unsigned desc_set = screen->desc_set_id[j] - 1;
204          for (int k = 0; k < shader->num_bindings[j]; k++) {
205             /* dynamic ubos handled in push */
206             if (shader->bindings[j][k].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
207                pg->dd->push_usage |= BITFIELD64_BIT(stage);
208 
209                push_count++;
210                continue;
211             }
212 
213             assert(num_bindings[desc_set] < ARRAY_SIZE(bindings[desc_set]));
214             VkDescriptorSetLayoutBinding *binding = &bindings[desc_set][num_bindings[desc_set]];
215             binding->binding = shader->bindings[j][k].binding;
216             binding->descriptorType = shader->bindings[j][k].type;
217             binding->descriptorCount = shader->bindings[j][k].size;
218             binding->stageFlags = stage_flags;
219             binding->pImmutableSamplers = NULL;
220 
221             unsigned idx = screen->compact_descriptors ? zink_vktype_to_size_idx_comp(shader->bindings[j][k].type) :
222                                                          zink_vktype_to_size_idx(shader->bindings[j][k].type);
223             sizes[idx].descriptorCount += shader->bindings[j][k].size;
224             sizes[idx].type = shader->bindings[j][k].type;
225             init_template_entry(shader, j, k, &entries[desc_set][entry_idx[desc_set]], &entry_idx[desc_set], zink_descriptor_mode == ZINK_DESCRIPTOR_MODE_LAZY);
226             num_bindings[desc_set]++;
227             has_bindings |= BITFIELD_BIT(desc_set);
228             pg->dd->real_binding_usage |= BITFIELD_BIT(j);
229          }
230          num_type_sizes[desc_set] = screen->compact_descriptors ?
231                                     descriptor_program_num_sizes_compact(sizes, desc_set) :
232                                     descriptor_program_num_sizes(sizes, j);
233       }
234       pg->dd->bindless |= shader->bindless;
235    }
236    if (pg->dd->bindless)
237       zink_descriptors_init_bindless(ctx);
238    pg->dd->binding_usage = has_bindings;
239    if (!has_bindings && !push_count && !pg->dd->bindless) {
240       ralloc_free(pg->dd);
241       pg->dd = NULL;
242 
243       pg->layout = zink_pipeline_layout_create(screen, pg, &pg->compat_id);
244       return !!pg->layout;
245    }
246 
247    pg->dsl[pg->num_dsl++] = push_count ? ctx->dd->push_dsl[pg->is_compute]->layout : ctx->dd->dummy_dsl->layout;
248    if (has_bindings) {
249       for (unsigned i = 0; i < ARRAY_SIZE(sizes); i++)
250          sizes[i].descriptorCount *= zink_descriptor_mode == ZINK_DESCRIPTOR_MODE_LAZY ? MAX_LAZY_DESCRIPTORS : ZINK_DEFAULT_MAX_DESCS;
251       u_foreach_bit(desc_set, has_bindings) {
252          for (unsigned i = 0; i < desc_set; i++) {
253             /* push set is always 0 */
254             if (!pg->dsl[i + 1]) {
255                /* inject a null dsl */
256                pg->dsl[pg->num_dsl++] = ctx->dd->dummy_dsl->layout;
257                pg->dd->binding_usage |= BITFIELD_BIT(i);
258             }
259          }
260          struct zink_descriptor_layout_key *key;
261          pg->dd->layouts[pg->num_dsl] = zink_descriptor_util_layout_get(ctx, desc_set, bindings[desc_set], num_bindings[desc_set], &key);
262          unsigned idx = screen->compact_descriptors ? zink_descriptor_type_to_size_idx_comp(desc_set) :
263                                                       zink_descriptor_type_to_size_idx(desc_set);
264          VkDescriptorPoolSize *sz = &sizes[idx];
265          VkDescriptorPoolSize sz2[4];
266          if (screen->compact_descriptors) {
267             unsigned found = 0;
268             while (found < num_type_sizes[desc_set]) {
269                if (sz->descriptorCount) {
270                   memcpy(&sz2[found], sz, sizeof(VkDescriptorPoolSize));
271                   found++;
272                }
273                sz++;
274             }
275             sz = sz2;
276          } else {
277             if (!sz->descriptorCount)
278                sz++;
279          }
280          pg->dd->pool_key[desc_set] = zink_descriptor_util_pool_key_get(ctx, desc_set, key, sz, num_type_sizes[desc_set]);
281          pg->dd->pool_key[desc_set]->use_count++;
282          pg->dsl[pg->num_dsl] = pg->dd->layouts[pg->num_dsl]->layout;
283          pg->num_dsl++;
284       }
285    }
286    /* TODO: make this dynamic? */
287    if (pg->dd->bindless) {
288       unsigned desc_set = screen->desc_set_id[ZINK_DESCRIPTOR_BINDLESS];
289       pg->num_dsl = desc_set + 1;
290       pg->dsl[desc_set] = ctx->dd->bindless_layout;
291       for (unsigned i = 0; i < desc_set; i++) {
292          if (!pg->dsl[i]) {
293             /* inject a null dsl */
294             pg->dsl[i] = ctx->dd->dummy_dsl->layout;
295             if (i != screen->desc_set_id[ZINK_DESCRIPTOR_TYPES])
296                pg->dd->binding_usage |= BITFIELD_BIT(i);
297          }
298       }
299       pg->dd->binding_usage |= BITFIELD_MASK(ZINK_DESCRIPTOR_TYPES);
300    }
301 
302    pg->layout = zink_pipeline_layout_create(screen, pg, &pg->compat_id);
303    if (!pg->layout)
304       return false;
305    /* TODO: figure out uncached+notemplate and return on zink_descriptor_mode == ZINK_DESCRIPTOR_MODE_NOTEMPLATES */
306    if (!screen->info.have_KHR_descriptor_update_template)
307       return true;
308 
309    VkDescriptorUpdateTemplateCreateInfo template[ZINK_DESCRIPTOR_TYPES + 1] = {0};
310    /* type of template */
311    VkDescriptorUpdateTemplateType types[ZINK_DESCRIPTOR_TYPES + 1] = {VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET};
312    if (have_push && zink_descriptor_mode == ZINK_DESCRIPTOR_MODE_LAZY)
313       types[0] = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR;
314 
315    /* number of descriptors in template */
316    unsigned wd_count[ZINK_DESCRIPTOR_TYPES + 1];
317    if (push_count)
318       wd_count[0] = pg->is_compute ? 1 : (ZINK_SHADER_COUNT + !!ctx->dd->has_fbfetch);
319    for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++)
320       wd_count[i + 1] = pg->dd->pool_key[i] ? pg->dd->pool_key[i]->layout->num_bindings : 0;
321 
322    VkDescriptorUpdateTemplateEntry *push_entries[2] = {
323       dd_lazy(ctx)->push_entries,
324       &dd_lazy(ctx)->compute_push_entry,
325    };
326    for (unsigned i = 0; i < pg->num_dsl; i++) {
327       bool is_push = i == 0;
328       /* no need for empty templates */
329       if (pg->dsl[i] == ctx->dd->dummy_dsl->layout ||
330           pg->dsl[i] == ctx->dd->bindless_layout ||
331           (!is_push && pg->dd->templates[i]))
332          continue;
333       template[i].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO;
334       assert(wd_count[i]);
335       template[i].descriptorUpdateEntryCount = wd_count[i];
336       if (is_push)
337          template[i].pDescriptorUpdateEntries = push_entries[pg->is_compute];
338       else
339          template[i].pDescriptorUpdateEntries = entries[i - 1];
340       template[i].templateType = types[i];
341       template[i].descriptorSetLayout = pg->dsl[i];
342       template[i].pipelineBindPoint = pg->is_compute ? VK_PIPELINE_BIND_POINT_COMPUTE : VK_PIPELINE_BIND_POINT_GRAPHICS;
343       template[i].pipelineLayout = pg->layout;
344       template[i].set = i;
345       VkDescriptorUpdateTemplate t;
346       if (VKSCR(CreateDescriptorUpdateTemplate)(screen->dev, &template[i], NULL, &t) != VK_SUCCESS)
347          return false;
348       pg->dd->templates[i] = t;
349    }
350    return true;
351 }
352 
353 void
zink_descriptor_program_deinit_lazy(struct zink_context * ctx,struct zink_program * pg)354 zink_descriptor_program_deinit_lazy(struct zink_context *ctx, struct zink_program *pg)
355 {
356    struct zink_screen *screen = zink_screen(ctx->base.screen);
357    if (!pg->dd)
358       return;
359    for (unsigned i = 0; pg->num_dsl && i < ZINK_DESCRIPTOR_TYPES; i++) {
360       if (pg->dd->pool_key[i])
361          pg->dd->pool_key[i]->use_count--;
362       if (pg->dd->templates[i])
363          VKSCR(DestroyDescriptorUpdateTemplate)(screen->dev, pg->dd->templates[i], NULL);
364    }
365    ralloc_free(pg->dd);
366    pg->dd = NULL;
367 }
368 
369 static VkDescriptorPool
create_pool(struct zink_screen * screen,unsigned num_type_sizes,const VkDescriptorPoolSize * sizes,unsigned flags)370 create_pool(struct zink_screen *screen, unsigned num_type_sizes, const VkDescriptorPoolSize *sizes, unsigned flags)
371 {
372    VkDescriptorPool pool;
373    VkDescriptorPoolCreateInfo dpci = {0};
374    dpci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
375    dpci.pPoolSizes = sizes;
376    dpci.poolSizeCount = num_type_sizes;
377    dpci.flags = flags;
378    dpci.maxSets = MAX_LAZY_DESCRIPTORS;
379    VkResult result = VKSCR(CreateDescriptorPool)(screen->dev, &dpci, 0, &pool);
380    if (result != VK_SUCCESS) {
381       mesa_loge("ZINK: vkCreateDescriptorPool failed (%s)", vk_Result_to_str(result));
382       return VK_NULL_HANDLE;
383    }
384    return pool;
385 }
386 
387 static struct zink_descriptor_pool *
388 get_descriptor_pool_lazy(struct zink_context *ctx, struct zink_program *pg, enum zink_descriptor_type type, struct zink_batch_descriptor_data_lazy *bdd, bool is_compute);
389 
390 static struct zink_descriptor_pool *
check_pool_alloc(struct zink_context * ctx,struct zink_descriptor_pool * pool,struct hash_entry * he,struct zink_program * pg,enum zink_descriptor_type type,struct zink_batch_descriptor_data_lazy * bdd,bool is_compute)391 check_pool_alloc(struct zink_context *ctx, struct zink_descriptor_pool *pool, struct hash_entry *he, struct zink_program *pg,
392                  enum zink_descriptor_type type, struct zink_batch_descriptor_data_lazy *bdd, bool is_compute)
393 {
394    struct zink_screen *screen = zink_screen(ctx->base.screen);
395    /* allocate up to $current * 10, e.g., 10 -> 100 or 100 -> 1000 */
396    if (pool->set_idx == pool->sets_alloc) {
397       unsigned sets_to_alloc = MIN2(MIN2(MAX2(pool->sets_alloc * 10, 10), MAX_LAZY_DESCRIPTORS) - pool->sets_alloc, 100);
398       if (!sets_to_alloc) {
399          /* overflowed pool: queue for deletion on next reset */
400          util_dynarray_append(&bdd->overflowed_pools, struct zink_descriptor_pool*, pool);
401          _mesa_hash_table_remove(&bdd->pools[type], he);
402          return get_descriptor_pool_lazy(ctx, pg, type, bdd, is_compute);
403       }
404       if (!zink_descriptor_util_alloc_sets(screen, pg->dsl[type + 1],
405                                            pool->pool, &pool->sets[pool->sets_alloc], sets_to_alloc))
406          return NULL;
407       pool->sets_alloc += sets_to_alloc;
408    }
409    return pool;
410 }
411 
412 static struct zink_descriptor_pool *
create_push_pool(struct zink_screen * screen,struct zink_batch_descriptor_data_lazy * bdd,bool is_compute,bool has_fbfetch)413 create_push_pool(struct zink_screen *screen, struct zink_batch_descriptor_data_lazy *bdd, bool is_compute, bool has_fbfetch)
414 {
415    struct zink_descriptor_pool *pool = rzalloc(bdd, struct zink_descriptor_pool);
416    VkDescriptorPoolSize sizes[2];
417    sizes[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
418    if (is_compute)
419       sizes[0].descriptorCount = MAX_LAZY_DESCRIPTORS;
420    else {
421       sizes[0].descriptorCount = ZINK_SHADER_COUNT * MAX_LAZY_DESCRIPTORS;
422       sizes[1].type = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
423       sizes[1].descriptorCount = MAX_LAZY_DESCRIPTORS;
424    }
425    pool->pool = create_pool(screen, !is_compute && has_fbfetch ? 2 : 1, sizes, 0);
426    return pool;
427 }
428 
429 static struct zink_descriptor_pool *
check_push_pool_alloc(struct zink_context * ctx,struct zink_descriptor_pool * pool,struct zink_batch_descriptor_data_lazy * bdd,bool is_compute)430 check_push_pool_alloc(struct zink_context *ctx, struct zink_descriptor_pool *pool, struct zink_batch_descriptor_data_lazy *bdd, bool is_compute)
431 {
432    struct zink_screen *screen = zink_screen(ctx->base.screen);
433    /* allocate up to $current * 10, e.g., 10 -> 100 or 100 -> 1000 */
434    if (pool->set_idx == pool->sets_alloc || unlikely(ctx->dd->has_fbfetch != bdd->has_fbfetch)) {
435       unsigned sets_to_alloc = MIN2(MIN2(MAX2(pool->sets_alloc * 10, 10), MAX_LAZY_DESCRIPTORS) - pool->sets_alloc, 100);
436       if (!sets_to_alloc || unlikely(ctx->dd->has_fbfetch != bdd->has_fbfetch)) {
437          /* overflowed pool: queue for deletion on next reset */
438          util_dynarray_append(&bdd->overflowed_pools, struct zink_descriptor_pool*, pool);
439          bdd->push_pool[is_compute] = create_push_pool(screen, bdd, is_compute, ctx->dd->has_fbfetch);
440          bdd->has_fbfetch = ctx->dd->has_fbfetch;
441          return check_push_pool_alloc(ctx, bdd->push_pool[is_compute], bdd, is_compute);
442       }
443       if (!zink_descriptor_util_alloc_sets(screen, ctx->dd->push_dsl[is_compute]->layout,
444                                            pool->pool, &pool->sets[pool->sets_alloc], sets_to_alloc)) {
445          mesa_loge("ZINK: failed to allocate push set!");
446          return NULL;
447       }
448       pool->sets_alloc += sets_to_alloc;
449    }
450    return pool;
451 }
452 
453 static struct zink_descriptor_pool *
get_descriptor_pool_lazy(struct zink_context * ctx,struct zink_program * pg,enum zink_descriptor_type type,struct zink_batch_descriptor_data_lazy * bdd,bool is_compute)454 get_descriptor_pool_lazy(struct zink_context *ctx, struct zink_program *pg, enum zink_descriptor_type type, struct zink_batch_descriptor_data_lazy *bdd, bool is_compute)
455 {
456    struct zink_screen *screen = zink_screen(ctx->base.screen);
457    const struct zink_descriptor_pool_key *pool_key = pg->dd->pool_key[type];
458    struct hash_entry *he = _mesa_hash_table_search(&bdd->pools[type], pool_key);
459    struct zink_descriptor_pool *pool;
460    if (he) {
461       pool = he->data;
462       return check_pool_alloc(ctx, pool, he, pg, type, bdd, is_compute);
463    }
464    pool = rzalloc(bdd, struct zink_descriptor_pool);
465    if (!pool)
466       return NULL;
467    const unsigned num_type_sizes = pool_key->sizes[1].descriptorCount ? 2 : 1;
468    pool->pool = create_pool(screen, num_type_sizes, pool_key->sizes, 0);
469    if (!pool->pool) {
470       ralloc_free(pool);
471       return NULL;
472    }
473    _mesa_hash_table_insert(&bdd->pools[type], pool_key, pool);
474    return check_pool_alloc(ctx, pool, he, pg, type, bdd, is_compute);
475 }
476 
477 ALWAYS_INLINE static VkDescriptorSet
get_descriptor_set_lazy(struct zink_descriptor_pool * pool)478 get_descriptor_set_lazy(struct zink_descriptor_pool *pool)
479 {
480    if (!pool)
481       return VK_NULL_HANDLE;
482 
483    assert(pool->set_idx < pool->sets_alloc);
484    return pool->sets[pool->set_idx++];
485 }
486 
487 static bool
populate_sets(struct zink_context * ctx,struct zink_batch_descriptor_data_lazy * bdd,struct zink_program * pg,uint8_t * changed_sets,VkDescriptorSet * sets)488 populate_sets(struct zink_context *ctx, struct zink_batch_descriptor_data_lazy *bdd,
489               struct zink_program *pg, uint8_t *changed_sets, VkDescriptorSet *sets)
490 {
491    u_foreach_bit(type, *changed_sets) {
492       if (pg->dd->pool_key[type]) {
493          struct zink_descriptor_pool *pool = get_descriptor_pool_lazy(ctx, pg, type, bdd, pg->is_compute);
494          sets[type] = get_descriptor_set_lazy(pool);
495          if (!sets[type])
496             return false;
497       } else
498          sets[type] = VK_NULL_HANDLE;
499    }
500    return true;
501 }
502 
503 void
zink_descriptor_set_update_lazy(struct zink_context * ctx,struct zink_program * pg,enum zink_descriptor_type type,VkDescriptorSet set)504 zink_descriptor_set_update_lazy(struct zink_context *ctx, struct zink_program *pg, enum zink_descriptor_type type, VkDescriptorSet set)
505 {
506    struct zink_screen *screen = zink_screen(ctx->base.screen);
507    VKCTX(UpdateDescriptorSetWithTemplate)(screen->dev, set, pg->dd->templates[type + 1], ctx);
508 }
509 
510 void
zink_descriptors_update_lazy_masked(struct zink_context * ctx,bool is_compute,uint8_t changed_sets,uint8_t bind_sets)511 zink_descriptors_update_lazy_masked(struct zink_context *ctx, bool is_compute, uint8_t changed_sets, uint8_t bind_sets)
512 {
513    struct zink_screen *screen = zink_screen(ctx->base.screen);
514    struct zink_batch_state *bs = ctx->batch.state;
515    struct zink_batch_descriptor_data_lazy *bdd = bdd_lazy(bs);
516    struct zink_program *pg = is_compute ? &ctx->curr_compute->base : &ctx->curr_program->base;
517    VkDescriptorSet desc_sets[ZINK_DESCRIPTOR_TYPES];
518    if (!pg->dd->binding_usage || (!changed_sets && !bind_sets))
519       return;
520 
521    if (!populate_sets(ctx, bdd, pg, &changed_sets, desc_sets)) {
522       debug_printf("ZINK: couldn't get descriptor sets!\n");
523       return;
524    }
525    /* no flushing allowed */
526    assert(ctx->batch.state == bs);
527 
528    u_foreach_bit(type, changed_sets) {
529       assert(type + 1 < pg->num_dsl);
530       if (pg->dd->pool_key[type]) {
531          VKSCR(UpdateDescriptorSetWithTemplate)(screen->dev, desc_sets[type], pg->dd->templates[type + 1], ctx);
532          VKSCR(CmdBindDescriptorSets)(bs->cmdbuf,
533                                  is_compute ? VK_PIPELINE_BIND_POINT_COMPUTE : VK_PIPELINE_BIND_POINT_GRAPHICS,
534                                  /* set index incremented by 1 to account for push set */
535                                  pg->layout, type + 1, 1, &desc_sets[type],
536                                  0, NULL);
537          bdd->sets[is_compute][type + 1] = desc_sets[type];
538       }
539    }
540    u_foreach_bit(type, bind_sets & ~changed_sets) {
541       if (!pg->dd->pool_key[type])
542          continue;
543       assert(bdd->sets[is_compute][type + 1]);
544       VKSCR(CmdBindDescriptorSets)(bs->cmdbuf,
545                               is_compute ? VK_PIPELINE_BIND_POINT_COMPUTE : VK_PIPELINE_BIND_POINT_GRAPHICS,
546                               /* set index incremented by 1 to account for push set */
547                               pg->layout, type + 1, 1, &bdd->sets[is_compute][type + 1],
548                               0, NULL);
549    }
550 }
551 
552 /* only called by cached manager for fbfetch handling */
553 VkDescriptorSet
zink_descriptors_alloc_lazy_push(struct zink_context * ctx)554 zink_descriptors_alloc_lazy_push(struct zink_context *ctx)
555 {
556    struct zink_batch_state *bs = ctx->batch.state;
557    struct zink_batch_descriptor_data_lazy *bdd = bdd_lazy(bs);
558    struct zink_screen *screen = zink_screen(ctx->base.screen);
559    VkDescriptorSet push_set = VK_NULL_HANDLE;
560    if (!bdd->push_pool[0]) {
561       bdd->push_pool[0] = create_push_pool(screen, bdd, false, true);
562       bdd->has_fbfetch = true;
563    }
564    struct zink_descriptor_pool *pool = check_push_pool_alloc(ctx, bdd->push_pool[0], bdd, false);
565    push_set = get_descriptor_set_lazy(pool);
566    if (!push_set)
567       mesa_loge("ZINK: failed to get push descriptor set!");
568    return push_set;
569 }
570 
571 void
zink_descriptors_update_lazy(struct zink_context * ctx,bool is_compute)572 zink_descriptors_update_lazy(struct zink_context *ctx, bool is_compute)
573 {
574    struct zink_batch_state *bs = ctx->batch.state;
575    struct zink_batch_descriptor_data_lazy *bdd = bdd_lazy(bs);
576    struct zink_program *pg = is_compute ? &ctx->curr_compute->base : &ctx->curr_program->base;
577    struct zink_screen *screen = zink_screen(ctx->base.screen);
578    bool have_KHR_push_descriptor = screen->info.have_KHR_push_descriptor;
579 
580    bool batch_changed = !bdd->pg[is_compute];
581    if (batch_changed) {
582       /* update all sets and bind null sets */
583       dd_lazy(ctx)->state_changed[is_compute] = pg->dd->binding_usage & BITFIELD_MASK(ZINK_DESCRIPTOR_TYPES);
584       dd_lazy(ctx)->push_state_changed[is_compute] = !!pg->dd->push_usage;
585    }
586 
587    if (pg != bdd->pg[is_compute]) {
588       /* if we don't already know that we have to update all sets,
589        * check to see if any dsls changed
590        *
591        * also always update the dsl pointers on program change
592        */
593        for (unsigned i = 0; i < ARRAY_SIZE(bdd->dsl[is_compute]); i++) {
594           /* push set is already detected, start at 1 */
595           if (bdd->dsl[is_compute][i] != pg->dsl[i + 1])
596              dd_lazy(ctx)->state_changed[is_compute] |= BITFIELD_BIT(i);
597           bdd->dsl[is_compute][i] = pg->dsl[i + 1];
598        }
599        dd_lazy(ctx)->push_state_changed[is_compute] |= bdd->push_usage[is_compute] != pg->dd->push_usage;
600        bdd->push_usage[is_compute] = pg->dd->push_usage;
601    }
602 
603    uint8_t changed_sets = pg->dd->binding_usage & dd_lazy(ctx)->state_changed[is_compute];
604    bool need_push = pg->dd->push_usage &&
605                     (dd_lazy(ctx)->push_state_changed[is_compute] || batch_changed);
606    VkDescriptorSet push_set = VK_NULL_HANDLE;
607    if (need_push && !have_KHR_push_descriptor) {
608       struct zink_descriptor_pool *pool = check_push_pool_alloc(ctx, bdd->push_pool[pg->is_compute], bdd, pg->is_compute);
609       push_set = get_descriptor_set_lazy(pool);
610       if (!push_set) {
611          mesa_loge("ZINK: failed to get push descriptor set!");
612          /* just jam something in to avoid a hang */
613          push_set = ctx->dd->dummy_set;
614       }
615    }
616    /*
617     * when binding a pipeline, the pipeline can correctly access any previously bound
618     * descriptor sets which were bound with compatible pipeline layouts
619     * VK 14.2.2
620     */
621    uint8_t bind_sets = bdd->pg[is_compute] && bdd->compat_id[is_compute] == pg->compat_id ? 0 : pg->dd->binding_usage;
622    if (pg->dd->push_usage && (dd_lazy(ctx)->push_state_changed[is_compute] || bind_sets)) {
623       if (have_KHR_push_descriptor) {
624          if (dd_lazy(ctx)->push_state_changed[is_compute])
625             VKCTX(CmdPushDescriptorSetWithTemplateKHR)(bs->cmdbuf, pg->dd->templates[0],
626                                                         pg->layout, 0, ctx);
627       } else {
628          if (dd_lazy(ctx)->push_state_changed[is_compute]) {
629             VKCTX(UpdateDescriptorSetWithTemplate)(screen->dev, push_set, pg->dd->templates[0], ctx);
630             bdd->sets[is_compute][0] = push_set;
631          }
632          assert(push_set || bdd->sets[is_compute][0]);
633          VKCTX(CmdBindDescriptorSets)(bs->cmdbuf,
634                                  is_compute ? VK_PIPELINE_BIND_POINT_COMPUTE : VK_PIPELINE_BIND_POINT_GRAPHICS,
635                                  pg->layout, 0, 1, push_set ? &push_set : &bdd->sets[is_compute][0],
636                                  0, NULL);
637       }
638    }
639    dd_lazy(ctx)->push_state_changed[is_compute] = false;
640    zink_descriptors_update_lazy_masked(ctx, is_compute, changed_sets, bind_sets);
641    if (pg->dd->bindless && unlikely(!ctx->dd->bindless_bound)) {
642       VKCTX(CmdBindDescriptorSets)(ctx->batch.state->cmdbuf, is_compute ? VK_PIPELINE_BIND_POINT_COMPUTE : VK_PIPELINE_BIND_POINT_GRAPHICS,
643                                    pg->layout, ZINK_DESCRIPTOR_BINDLESS, 1, &ctx->dd->bindless_set,
644                                    0, NULL);
645       ctx->dd->bindless_bound = true;
646    }
647    bdd->pg[is_compute] = pg;
648    ctx->dd->pg[is_compute] = pg;
649    bdd->compat_id[is_compute] = pg->compat_id;
650    dd_lazy(ctx)->state_changed[is_compute] = 0;
651 }
652 
653 void
zink_context_invalidate_descriptor_state_lazy(struct zink_context * ctx,enum pipe_shader_type shader,enum zink_descriptor_type type,unsigned start,unsigned count)654 zink_context_invalidate_descriptor_state_lazy(struct zink_context *ctx, enum pipe_shader_type shader, enum zink_descriptor_type type, unsigned start, unsigned count)
655 {
656    if (type == ZINK_DESCRIPTOR_TYPE_UBO && !start)
657       dd_lazy(ctx)->push_state_changed[shader == PIPE_SHADER_COMPUTE] = true;
658    else {
659       if (zink_screen(ctx->base.screen)->compact_descriptors && type > ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW)
660          type -= ZINK_DESCRIPTOR_COMPACT;
661       dd_lazy(ctx)->state_changed[shader == PIPE_SHADER_COMPUTE] |= BITFIELD_BIT(type);
662    }
663 }
664 
665 void
zink_batch_descriptor_deinit_lazy(struct zink_screen * screen,struct zink_batch_state * bs)666 zink_batch_descriptor_deinit_lazy(struct zink_screen *screen, struct zink_batch_state *bs)
667 {
668    if (!bs->dd)
669       return;
670    struct zink_batch_descriptor_data_lazy *bdd = bdd_lazy(bs);
671    if (screen->info.have_KHR_descriptor_update_template) {
672       for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
673          hash_table_foreach(&bdd->pools[i], entry) {
674             struct zink_descriptor_pool *pool = (void*)entry->data;
675             VKSCR(DestroyDescriptorPool)(screen->dev, pool->pool, NULL);
676          }
677       }
678       if (bdd->push_pool[0])
679          VKSCR(DestroyDescriptorPool)(screen->dev, bdd->push_pool[0]->pool, NULL);
680       if (bdd->push_pool[1])
681          VKSCR(DestroyDescriptorPool)(screen->dev, bdd->push_pool[1]->pool, NULL);
682    }
683    ralloc_free(bs->dd);
684 }
685 
686 static void
pool_destroy(struct zink_screen * screen,struct zink_descriptor_pool * pool)687 pool_destroy(struct zink_screen *screen, struct zink_descriptor_pool *pool)
688 {
689    VKSCR(DestroyDescriptorPool)(screen->dev, pool->pool, NULL);
690    ralloc_free(pool);
691 }
692 
693 void
zink_batch_descriptor_reset_lazy(struct zink_screen * screen,struct zink_batch_state * bs)694 zink_batch_descriptor_reset_lazy(struct zink_screen *screen, struct zink_batch_state *bs)
695 {
696    if (!screen->info.have_KHR_descriptor_update_template)
697       return;
698    struct zink_batch_descriptor_data_lazy *bdd = bdd_lazy(bs);
699    for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
700       hash_table_foreach(&bdd->pools[i], entry) {
701          const struct zink_descriptor_pool_key *key = entry->key;
702          struct zink_descriptor_pool *pool = (void*)entry->data;
703          if (key->use_count)
704             pool->set_idx = 0;
705          else {
706             pool_destroy(screen, pool);
707             _mesa_hash_table_remove(&bdd->pools[i], entry);
708          }
709       }
710    }
711    for (unsigned i = 0; i < 2; i++) {
712       bdd->pg[i] = NULL;
713       if (bdd->push_pool[i])
714          bdd->push_pool[i]->set_idx = 0;
715    }
716    while (util_dynarray_num_elements(&bdd->overflowed_pools, struct zink_descriptor_pool*)) {
717       struct zink_descriptor_pool *pool = util_dynarray_pop(&bdd->overflowed_pools, struct zink_descriptor_pool*);
718       pool_destroy(screen, pool);
719    }
720 }
721 
722 bool
zink_batch_descriptor_init_lazy(struct zink_screen * screen,struct zink_batch_state * bs)723 zink_batch_descriptor_init_lazy(struct zink_screen *screen, struct zink_batch_state *bs)
724 {
725    bs->dd = (void*)rzalloc(bs, struct zink_batch_descriptor_data_lazy);
726    if (!bs->dd)
727       return false;
728    if (!screen->info.have_KHR_descriptor_update_template)
729       return true;
730    struct zink_batch_descriptor_data_lazy *bdd = bdd_lazy(bs);
731    for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
732       if (!_mesa_hash_table_init(&bdd->pools[i], bs->dd, _mesa_hash_pointer, _mesa_key_pointer_equal))
733          return false;
734    }
735    util_dynarray_init(&bdd->overflowed_pools, bs->dd);
736    if (!screen->info.have_KHR_push_descriptor) {
737       bdd->push_pool[0] = create_push_pool(screen, bdd, false, false);
738       bdd->push_pool[1] = create_push_pool(screen, bdd, true, false);
739    }
740    return true;
741 }
742 
743 static void
init_push_template_entry(VkDescriptorUpdateTemplateEntry * entry,unsigned i)744 init_push_template_entry(VkDescriptorUpdateTemplateEntry *entry, unsigned i)
745 {
746    entry->dstBinding = tgsi_processor_to_shader_stage(i);
747    entry->descriptorCount = 1;
748    entry->descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
749    entry->offset = offsetof(struct zink_context, di.ubos[i][0]);
750    entry->stride = sizeof(VkDescriptorBufferInfo);
751 }
752 
753 bool
zink_descriptors_init_lazy(struct zink_context * ctx)754 zink_descriptors_init_lazy(struct zink_context *ctx)
755 {
756    struct zink_screen *screen = zink_screen(ctx->base.screen);
757    ctx->dd = (void*)rzalloc(ctx, struct zink_descriptor_data_lazy);
758    if (!ctx->dd)
759       return false;
760 
761    if (zink_descriptor_mode == ZINK_DESCRIPTOR_MODE_NOTEMPLATES)
762       printf("ZINK: CACHED/NOTEMPLATES DESCRIPTORS\n");
763    else if (screen->info.have_KHR_descriptor_update_template) {
764       for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++) {
765          VkDescriptorUpdateTemplateEntry *entry = &dd_lazy(ctx)->push_entries[i];
766          init_push_template_entry(entry, i);
767       }
768       init_push_template_entry(&dd_lazy(ctx)->compute_push_entry, PIPE_SHADER_COMPUTE);
769       VkDescriptorUpdateTemplateEntry *entry = &dd_lazy(ctx)->push_entries[ZINK_SHADER_COUNT]; //fbfetch
770       entry->dstBinding = ZINK_FBFETCH_BINDING;
771       entry->descriptorCount = 1;
772       entry->descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
773       entry->offset = offsetof(struct zink_context, di.fbfetch);
774       entry->stride = sizeof(VkDescriptorImageInfo);
775    }
776    struct zink_descriptor_layout_key *layout_key;
777    if (!zink_descriptor_util_push_layouts_get(ctx, ctx->dd->push_dsl, ctx->dd->push_layout_keys))
778       return false;
779 
780    ctx->dd->dummy_dsl = zink_descriptor_util_layout_get(ctx, 0, NULL, 0, &layout_key);
781    if (!ctx->dd->dummy_dsl)
782       return false;
783 
784    return true;
785 }
786 
787 void
zink_descriptors_deinit_lazy(struct zink_context * ctx)788 zink_descriptors_deinit_lazy(struct zink_context *ctx)
789 {
790    if (ctx->dd) {
791       struct zink_screen *screen = zink_screen(ctx->base.screen);
792       if (ctx->dd->dummy_pool)
793          VKSCR(DestroyDescriptorPool)(screen->dev, ctx->dd->dummy_pool, NULL);
794       if (ctx->dd->push_dsl[0])
795          VKSCR(DestroyDescriptorSetLayout)(screen->dev, ctx->dd->push_dsl[0]->layout, NULL);
796       if (ctx->dd->push_dsl[1])
797          VKSCR(DestroyDescriptorSetLayout)(screen->dev, ctx->dd->push_dsl[1]->layout, NULL);
798    }
799    ralloc_free(ctx->dd);
800 }
801