• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2021 Valve Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Mike Blumenkrantz <michael.blumenkrantz@gmail.com>
25  */
26 #include "tgsi/tgsi_from_mesa.h"
27 
28 
29 
30 #include "zink_context.h"
31 #include "zink_compiler.h"
32 #include "zink_descriptors.h"
33 #include "zink_program.h"
34 #include "zink_resource.h"
35 #include "zink_screen.h"
36 
37 #define MAX_LAZY_DESCRIPTORS (ZINK_DEFAULT_MAX_DESCS / 10)
38 
39 struct zink_descriptor_data_lazy {
40    struct zink_descriptor_data base;
41    VkDescriptorUpdateTemplateEntry push_entries[PIPE_SHADER_TYPES]; //gfx+fbfetch
42    VkDescriptorUpdateTemplateEntry compute_push_entry;
43    bool push_state_changed[2]; //gfx, compute
44    uint8_t state_changed[2]; //gfx, compute
45 };
46 
47 struct zink_descriptor_pool {
48    VkDescriptorPool pool;
49    VkDescriptorSet sets[MAX_LAZY_DESCRIPTORS];
50    unsigned set_idx;
51    unsigned sets_alloc;
52 };
53 
54 struct zink_batch_descriptor_data_lazy {
55    struct zink_batch_descriptor_data base;
56    struct util_dynarray overflowed_pools;
57    struct hash_table pools[ZINK_DESCRIPTOR_TYPES];
58    struct zink_descriptor_pool *push_pool[2];
59    struct zink_program *pg[2]; //gfx, compute
60    uint32_t compat_id[2];
61    VkDescriptorSetLayout dsl[2][ZINK_DESCRIPTOR_TYPES];
62    VkDescriptorSet sets[2][ZINK_DESCRIPTOR_TYPES + 1];
63    unsigned push_usage[2];
64    bool has_fbfetch;
65 };
66 
67 ALWAYS_INLINE static struct zink_descriptor_data_lazy *
dd_lazy(struct zink_context * ctx)68 dd_lazy(struct zink_context *ctx)
69 {
70    return (struct zink_descriptor_data_lazy*)ctx->dd;
71 }
72 
73 ALWAYS_INLINE static struct zink_batch_descriptor_data_lazy *
bdd_lazy(struct zink_batch_state * bs)74 bdd_lazy(struct zink_batch_state *bs)
75 {
76    return (struct zink_batch_descriptor_data_lazy*)bs->dd;
77 }
78 
79 static void
init_template_entry(struct zink_shader * shader,enum zink_descriptor_type type,unsigned idx,unsigned offset,VkDescriptorUpdateTemplateEntry * entry,unsigned * entry_idx,bool flatten_dynamic)80 init_template_entry(struct zink_shader *shader, enum zink_descriptor_type type,
81                     unsigned idx, unsigned offset, VkDescriptorUpdateTemplateEntry *entry, unsigned *entry_idx, bool flatten_dynamic)
82 {
83     int index = shader->bindings[type][idx].index;
84     enum pipe_shader_type stage = pipe_shader_type_from_mesa(shader->nir->info.stage);
85     entry->dstArrayElement = 0;
86     entry->dstBinding = shader->bindings[type][idx].binding;
87     if (shader->bindings[type][idx].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC && flatten_dynamic)
88        /* filter out DYNAMIC type here */
89        entry->descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
90     else
91        entry->descriptorType = shader->bindings[type][idx].type;
92     switch (shader->bindings[type][idx].type) {
93     case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
94     case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
95        entry->descriptorCount = 1;
96        entry->offset = offsetof(struct zink_context, di.ubos[stage][index + offset]);
97        entry->stride = sizeof(VkDescriptorBufferInfo);
98        break;
99     case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
100        entry->descriptorCount = shader->bindings[type][idx].size;
101        entry->offset = offsetof(struct zink_context, di.textures[stage][index + offset]);
102        entry->stride = sizeof(VkDescriptorImageInfo);
103        break;
104     case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
105        entry->descriptorCount = shader->bindings[type][idx].size;
106        entry->offset = offsetof(struct zink_context, di.tbos[stage][index + offset]);
107        entry->stride = sizeof(VkBufferView);
108        break;
109     case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
110        entry->descriptorCount = 1;
111        entry->offset = offsetof(struct zink_context, di.ssbos[stage][index + offset]);
112        entry->stride = sizeof(VkDescriptorBufferInfo);
113        break;
114     case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
115        entry->descriptorCount = shader->bindings[type][idx].size;
116        entry->offset = offsetof(struct zink_context, di.images[stage][index + offset]);
117        entry->stride = sizeof(VkDescriptorImageInfo);
118        break;
119     case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
120        entry->descriptorCount = shader->bindings[type][idx].size;
121        entry->offset = offsetof(struct zink_context, di.texel_images[stage][index + offset]);
122        entry->stride = sizeof(VkBufferView);
123        break;
124     default:
125        unreachable("unknown type");
126     }
127     (*entry_idx)++;
128 }
129 
130 bool
zink_descriptor_program_init_lazy(struct zink_context * ctx,struct zink_program * pg)131 zink_descriptor_program_init_lazy(struct zink_context *ctx, struct zink_program *pg)
132 {
133    struct zink_screen *screen = zink_screen(ctx->base.screen);
134    VkDescriptorSetLayoutBinding bindings[ZINK_DESCRIPTOR_TYPES][PIPE_SHADER_TYPES * 32];
135    VkDescriptorUpdateTemplateEntry entries[ZINK_DESCRIPTOR_TYPES][PIPE_SHADER_TYPES * 32];
136    unsigned num_bindings[ZINK_DESCRIPTOR_TYPES] = {0};
137    uint8_t has_bindings = 0;
138    unsigned push_count = 0;
139 
140    struct zink_shader **stages;
141    if (pg->is_compute)
142       stages = &((struct zink_compute_program*)pg)->shader;
143    else {
144       stages = ((struct zink_gfx_program*)pg)->shaders;
145       if (stages[PIPE_SHADER_FRAGMENT]->nir->info.fs.uses_fbfetch_output) {
146          zink_descriptor_util_init_fbfetch(ctx);
147          push_count = 1;
148          pg->dd->fbfetch = true;
149       }
150    }
151 
152    if (!pg->dd)
153       pg->dd = (void*)rzalloc(pg, struct zink_program_descriptor_data);
154    if (!pg->dd)
155       return false;
156 
157    unsigned entry_idx[ZINK_DESCRIPTOR_TYPES] = {0};
158 
159    unsigned num_shaders = pg->is_compute ? 1 : ZINK_SHADER_COUNT;
160    bool have_push = screen->info.have_KHR_push_descriptor;
161    for (int i = 0; i < num_shaders; i++) {
162       struct zink_shader *shader = stages[i];
163       if (!shader)
164          continue;
165 
166       enum pipe_shader_type stage = pipe_shader_type_from_mesa(shader->nir->info.stage);
167       VkShaderStageFlagBits stage_flags = zink_shader_stage(stage);
168       for (int j = 0; j < ZINK_DESCRIPTOR_TYPES; j++) {
169          for (int k = 0; k < shader->num_bindings[j]; k++) {
170             /* dynamic ubos handled in push */
171             if (shader->bindings[j][k].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
172                pg->dd->push_usage |= BITFIELD64_BIT(stage);
173 
174                push_count++;
175                continue;
176             }
177 
178             assert(num_bindings[j] < ARRAY_SIZE(bindings[j]));
179             VkDescriptorSetLayoutBinding *binding = &bindings[j][num_bindings[j]];
180             binding->binding = shader->bindings[j][k].binding;
181             binding->descriptorType = shader->bindings[j][k].type;
182             binding->descriptorCount = shader->bindings[j][k].size;
183             binding->stageFlags = stage_flags;
184             binding->pImmutableSamplers = NULL;
185 
186             enum zink_descriptor_size_index idx = zink_vktype_to_size_idx(shader->bindings[j][k].type);
187             pg->dd->sizes[idx].descriptorCount += shader->bindings[j][k].size;
188             pg->dd->sizes[idx].type = shader->bindings[j][k].type;
189             switch (shader->bindings[j][k].type) {
190             case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
191             case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
192             case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
193             case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
194                init_template_entry(shader, j, k, 0, &entries[j][entry_idx[j]], &entry_idx[j], screen->descriptor_mode == ZINK_DESCRIPTOR_MODE_LAZY);
195                break;
196             case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
197             case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
198                for (unsigned l = 0; l < shader->bindings[j][k].size; l++)
199                   init_template_entry(shader, j, k, l, &entries[j][entry_idx[j]], &entry_idx[j], screen->descriptor_mode == ZINK_DESCRIPTOR_MODE_LAZY);
200                break;
201             default:
202                break;
203             }
204             num_bindings[j]++;
205             has_bindings |= BITFIELD_BIT(j);
206          }
207       }
208       pg->dd->bindless |= shader->bindless;
209    }
210    if (pg->dd->bindless)
211       zink_descriptors_init_bindless(ctx);
212    pg->dd->binding_usage = has_bindings;
213    if (!has_bindings && !push_count) {
214       ralloc_free(pg->dd);
215       pg->dd = NULL;
216 
217       pg->layout = zink_pipeline_layout_create(screen, pg, &pg->compat_id);
218       return !!pg->layout;
219    }
220 
221    pg->dsl[pg->num_dsl++] = push_count ? ctx->dd->push_dsl[pg->is_compute]->layout : ctx->dd->dummy_dsl->layout;
222    if (has_bindings) {
223       u_foreach_bit(type, has_bindings) {
224          for (unsigned i = 0; i < type; i++) {
225             /* push set is always 0 */
226             if (!pg->dsl[i + 1]) {
227                /* inject a null dsl */
228                pg->dsl[pg->num_dsl++] = ctx->dd->dummy_dsl->layout;
229                pg->dd->binding_usage |= BITFIELD_BIT(i);
230             }
231          }
232          pg->dd->layouts[pg->num_dsl] = zink_descriptor_util_layout_get(ctx, type, bindings[type], num_bindings[type], &pg->dd->layout_key[type]);
233          pg->dd->layout_key[type]->use_count++;
234          pg->dsl[pg->num_dsl] = pg->dd->layouts[pg->num_dsl]->layout;
235          pg->num_dsl++;
236       }
237       for (unsigned i = 0; i < ARRAY_SIZE(pg->dd->sizes); i++)
238          pg->dd->sizes[i].descriptorCount *= screen->descriptor_mode == ZINK_DESCRIPTOR_MODE_LAZY ? MAX_LAZY_DESCRIPTORS : ZINK_DEFAULT_MAX_DESCS;
239    }
240    /* TODO: make this dynamic? */
241    if (pg->dd->bindless) {
242       pg->num_dsl = ZINK_DESCRIPTOR_BINDLESS + 1;
243       pg->dsl[ZINK_DESCRIPTOR_BINDLESS] = ctx->dd->bindless_layout;
244       for (unsigned i = 0; i < ZINK_DESCRIPTOR_BINDLESS; i++) {
245          if (!pg->dsl[i]) {
246             /* inject a null dsl */
247             pg->dsl[i] = ctx->dd->dummy_dsl->layout;
248             if (i != ZINK_DESCRIPTOR_TYPES)
249                pg->dd->binding_usage |= BITFIELD_BIT(i);
250          }
251       }
252    }
253 
254    pg->layout = zink_pipeline_layout_create(screen, pg, &pg->compat_id);
255    if (!pg->layout)
256       return false;
257    if (!screen->info.have_KHR_descriptor_update_template || screen->descriptor_mode == ZINK_DESCRIPTOR_MODE_NOTEMPLATES)
258       return true;
259 
260    VkDescriptorUpdateTemplateCreateInfo template[ZINK_DESCRIPTOR_TYPES + 1] = {0};
261    /* type of template */
262    VkDescriptorUpdateTemplateType types[ZINK_DESCRIPTOR_TYPES + 1] = {VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET};
263    if (have_push && screen->descriptor_mode == ZINK_DESCRIPTOR_MODE_LAZY)
264       types[0] = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR;
265 
266    /* number of descriptors in template */
267    unsigned wd_count[ZINK_DESCRIPTOR_TYPES + 1];
268    if (push_count)
269       wd_count[0] = pg->is_compute ? 1 : (ZINK_SHADER_COUNT + !!ctx->dd->has_fbfetch);
270    for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++)
271       wd_count[i + 1] = pg->dd->layout_key[i] ? pg->dd->layout_key[i]->num_descriptors : 0;
272 
273    VkDescriptorUpdateTemplateEntry *push_entries[2] = {
274       dd_lazy(ctx)->push_entries,
275       &dd_lazy(ctx)->compute_push_entry,
276    };
277    for (unsigned i = 0; i < pg->num_dsl; i++) {
278       bool is_push = i == 0;
279       /* no need for empty templates */
280       if (pg->dsl[i] == ctx->dd->dummy_dsl->layout ||
281           pg->dsl[i] == ctx->dd->bindless_layout ||
282           (!is_push && pg->dd->layouts[i]->desc_template))
283          continue;
284       template[i].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO;
285       assert(wd_count[i]);
286       template[i].descriptorUpdateEntryCount = wd_count[i];
287       if (is_push)
288          template[i].pDescriptorUpdateEntries = push_entries[pg->is_compute];
289       else
290          template[i].pDescriptorUpdateEntries = entries[i - 1];
291       template[i].templateType = types[i];
292       template[i].descriptorSetLayout = pg->dsl[i];
293       template[i].pipelineBindPoint = pg->is_compute ? VK_PIPELINE_BIND_POINT_COMPUTE : VK_PIPELINE_BIND_POINT_GRAPHICS;
294       template[i].pipelineLayout = pg->layout;
295       template[i].set = i;
296       VkDescriptorUpdateTemplateKHR t;
297       if (VKSCR(CreateDescriptorUpdateTemplate)(screen->dev, &template[i], NULL, &t) != VK_SUCCESS)
298          return false;
299       if (is_push)
300          pg->dd->push_template = t;
301       else
302          pg->dd->layouts[i]->desc_template = t;
303    }
304    return true;
305 }
306 
307 void
zink_descriptor_program_deinit_lazy(struct zink_screen * screen,struct zink_program * pg)308 zink_descriptor_program_deinit_lazy(struct zink_screen *screen, struct zink_program *pg)
309 {
310    for (unsigned i = 0; pg->num_dsl && i < ZINK_DESCRIPTOR_TYPES; i++) {
311       if (pg->dd->layout_key[i])
312          pg->dd->layout_key[i]->use_count--;
313    }
314    if (pg->dd && pg->dd->push_template)
315       VKSCR(DestroyDescriptorUpdateTemplate)(screen->dev, pg->dd->push_template, NULL);
316    ralloc_free(pg->dd);
317 }
318 
319 static VkDescriptorPool
create_pool(struct zink_screen * screen,unsigned num_type_sizes,VkDescriptorPoolSize * sizes,unsigned flags)320 create_pool(struct zink_screen *screen, unsigned num_type_sizes, VkDescriptorPoolSize *sizes, unsigned flags)
321 {
322    VkDescriptorPool pool;
323    VkDescriptorPoolCreateInfo dpci = {0};
324    dpci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
325    dpci.pPoolSizes = sizes;
326    dpci.poolSizeCount = num_type_sizes;
327    dpci.flags = flags;
328    dpci.maxSets = MAX_LAZY_DESCRIPTORS;
329    if (VKSCR(CreateDescriptorPool)(screen->dev, &dpci, 0, &pool) != VK_SUCCESS) {
330       debug_printf("vkCreateDescriptorPool failed\n");
331       return VK_NULL_HANDLE;
332    }
333    return pool;
334 }
335 
336 static struct zink_descriptor_pool *
337 get_descriptor_pool_lazy(struct zink_context *ctx, struct zink_program *pg, enum zink_descriptor_type type, struct zink_batch_descriptor_data_lazy *bdd, bool is_compute);
338 
339 static struct zink_descriptor_pool *
check_pool_alloc(struct zink_context * ctx,struct zink_descriptor_pool * pool,struct hash_entry * he,struct zink_program * pg,enum zink_descriptor_type type,struct zink_batch_descriptor_data_lazy * bdd,bool is_compute)340 check_pool_alloc(struct zink_context *ctx, struct zink_descriptor_pool *pool, struct hash_entry *he, struct zink_program *pg,
341                  enum zink_descriptor_type type, struct zink_batch_descriptor_data_lazy *bdd, bool is_compute)
342 {
343    struct zink_screen *screen = zink_screen(ctx->base.screen);
344    /* allocate up to $current * 10, e.g., 10 -> 100 or 100 -> 1000 */
345    if (pool->set_idx == pool->sets_alloc) {
346       unsigned sets_to_alloc = MIN2(MIN2(MAX2(pool->sets_alloc * 10, 10), MAX_LAZY_DESCRIPTORS) - pool->sets_alloc, 100);
347       if (!sets_to_alloc) {
348          /* overflowed pool: queue for deletion on next reset */
349          util_dynarray_append(&bdd->overflowed_pools, struct zink_descriptor_pool*, pool);
350          _mesa_hash_table_remove(&bdd->pools[type], he);
351          return get_descriptor_pool_lazy(ctx, pg, type, bdd, is_compute);
352       }
353       if (!zink_descriptor_util_alloc_sets(screen, pg->dsl[type + 1],
354                                            pool->pool, &pool->sets[pool->sets_alloc], sets_to_alloc))
355          return NULL;
356       pool->sets_alloc += sets_to_alloc;
357    }
358    return pool;
359 }
360 
361 static struct zink_descriptor_pool *
create_push_pool(struct zink_screen * screen,struct zink_batch_descriptor_data_lazy * bdd,bool is_compute,bool has_fbfetch)362 create_push_pool(struct zink_screen *screen, struct zink_batch_descriptor_data_lazy *bdd, bool is_compute, bool has_fbfetch)
363 {
364    struct zink_descriptor_pool *pool = rzalloc(bdd, struct zink_descriptor_pool);
365    VkDescriptorPoolSize sizes[2];
366    sizes[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
367    if (is_compute)
368       sizes[0].descriptorCount = MAX_LAZY_DESCRIPTORS;
369    else {
370       sizes[0].descriptorCount = ZINK_SHADER_COUNT * MAX_LAZY_DESCRIPTORS;
371       sizes[1].type = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
372       sizes[1].descriptorCount = MAX_LAZY_DESCRIPTORS;
373    }
374    pool->pool = create_pool(screen, !is_compute && has_fbfetch ? 2 : 1, sizes, 0);
375    return pool;
376 }
377 
378 static struct zink_descriptor_pool *
check_push_pool_alloc(struct zink_context * ctx,struct zink_descriptor_pool * pool,struct zink_batch_descriptor_data_lazy * bdd,bool is_compute)379 check_push_pool_alloc(struct zink_context *ctx, struct zink_descriptor_pool *pool, struct zink_batch_descriptor_data_lazy *bdd, bool is_compute)
380 {
381    struct zink_screen *screen = zink_screen(ctx->base.screen);
382    /* allocate up to $current * 10, e.g., 10 -> 100 or 100 -> 1000 */
383    if (pool->set_idx == pool->sets_alloc || unlikely(ctx->dd->has_fbfetch != bdd->has_fbfetch)) {
384       unsigned sets_to_alloc = MIN2(MIN2(MAX2(pool->sets_alloc * 10, 10), MAX_LAZY_DESCRIPTORS) - pool->sets_alloc, 100);
385       if (!sets_to_alloc || unlikely(ctx->dd->has_fbfetch != bdd->has_fbfetch)) {
386          /* overflowed pool: queue for deletion on next reset */
387          util_dynarray_append(&bdd->overflowed_pools, struct zink_descriptor_pool*, pool);
388          bdd->push_pool[is_compute] = create_push_pool(screen, bdd, is_compute, ctx->dd->has_fbfetch);
389          bdd->has_fbfetch = ctx->dd->has_fbfetch;
390          return check_push_pool_alloc(ctx, bdd->push_pool[is_compute], bdd, is_compute);
391       }
392       if (!zink_descriptor_util_alloc_sets(screen, ctx->dd->push_dsl[is_compute]->layout,
393                                            pool->pool, &pool->sets[pool->sets_alloc], sets_to_alloc))
394          return NULL;
395       pool->sets_alloc += sets_to_alloc;
396    }
397    return pool;
398 }
399 
400 static struct zink_descriptor_pool *
get_descriptor_pool_lazy(struct zink_context * ctx,struct zink_program * pg,enum zink_descriptor_type type,struct zink_batch_descriptor_data_lazy * bdd,bool is_compute)401 get_descriptor_pool_lazy(struct zink_context *ctx, struct zink_program *pg, enum zink_descriptor_type type, struct zink_batch_descriptor_data_lazy *bdd, bool is_compute)
402 {
403    struct zink_screen *screen = zink_screen(ctx->base.screen);
404    struct hash_entry *he = _mesa_hash_table_search(&bdd->pools[type], pg->dd->layout_key[type]);
405    struct zink_descriptor_pool *pool;
406    if (he) {
407       pool = he->data;
408       return check_pool_alloc(ctx, pool, he, pg, type, bdd, is_compute);
409    }
410    pool = rzalloc(bdd, struct zink_descriptor_pool);
411    if (!pool)
412       return NULL;
413    unsigned idx = zink_descriptor_type_to_size_idx(type);
414    VkDescriptorPoolSize *size = &pg->dd->sizes[idx];
415    /* this is a sampler/image set with no images only texels */
416    if (!size->descriptorCount)
417       size++;
418    pool->pool = create_pool(screen, zink_descriptor_program_num_sizes(pg, type), size, 0);
419    if (!pool->pool) {
420       ralloc_free(pool);
421       return NULL;
422    }
423    _mesa_hash_table_insert(&bdd->pools[type], pg->dd->layout_key[type], pool);
424    return check_pool_alloc(ctx, pool, he, pg, type, bdd, is_compute);
425 }
426 
427 ALWAYS_INLINE static VkDescriptorSet
get_descriptor_set_lazy(struct zink_descriptor_pool * pool)428 get_descriptor_set_lazy(struct zink_descriptor_pool *pool)
429 {
430    if (!pool)
431       return VK_NULL_HANDLE;
432 
433    assert(pool->set_idx < pool->sets_alloc);
434    return pool->sets[pool->set_idx++];
435 }
436 
437 static bool
populate_sets(struct zink_context * ctx,struct zink_batch_descriptor_data_lazy * bdd,struct zink_program * pg,uint8_t * changed_sets,VkDescriptorSet * sets)438 populate_sets(struct zink_context *ctx, struct zink_batch_descriptor_data_lazy *bdd,
439               struct zink_program *pg, uint8_t *changed_sets, VkDescriptorSet *sets)
440 {
441    u_foreach_bit(type, *changed_sets) {
442       if (pg->dd->layout_key[type]) {
443          struct zink_descriptor_pool *pool = get_descriptor_pool_lazy(ctx, pg, type, bdd, pg->is_compute);
444          sets[type] = get_descriptor_set_lazy(pool);
445       } else
446          sets[type] = ctx->dd->dummy_set;
447       if (!sets[type])
448          return false;
449    }
450    return true;
451 }
452 
453 void
zink_descriptor_set_update_lazy(struct zink_context * ctx,struct zink_program * pg,enum zink_descriptor_type type,VkDescriptorSet set)454 zink_descriptor_set_update_lazy(struct zink_context *ctx, struct zink_program *pg, enum zink_descriptor_type type, VkDescriptorSet set)
455 {
456    struct zink_screen *screen = zink_screen(ctx->base.screen);
457    VKCTX(UpdateDescriptorSetWithTemplate)(screen->dev, set, pg->dd->layouts[type + 1]->desc_template, ctx);
458 }
459 
460 void
zink_descriptors_update_lazy_masked(struct zink_context * ctx,bool is_compute,uint8_t changed_sets,uint8_t bind_sets)461 zink_descriptors_update_lazy_masked(struct zink_context *ctx, bool is_compute, uint8_t changed_sets, uint8_t bind_sets)
462 {
463    struct zink_screen *screen = zink_screen(ctx->base.screen);
464    struct zink_batch_state *bs = ctx->batch.state;
465    struct zink_batch_descriptor_data_lazy *bdd = bdd_lazy(bs);
466    struct zink_program *pg = is_compute ? &ctx->curr_compute->base : &ctx->curr_program->base;
467    VkDescriptorSet desc_sets[ZINK_DESCRIPTOR_TYPES];
468    if (!pg->dd->binding_usage || (!changed_sets && !bind_sets))
469       return;
470 
471    if (!populate_sets(ctx, bdd, pg, &changed_sets, desc_sets)) {
472       debug_printf("ZINK: couldn't get descriptor sets!\n");
473       return;
474    }
475    /* no flushing allowed */
476    assert(ctx->batch.state == bs);
477 
478    u_foreach_bit(type, changed_sets) {
479       assert(type + 1 < pg->num_dsl);
480       if (pg->dd->layout_key[type]) {
481          VKSCR(UpdateDescriptorSetWithTemplate)(screen->dev, desc_sets[type], pg->dd->layouts[type + 1]->desc_template, ctx);
482          VKSCR(CmdBindDescriptorSets)(bs->cmdbuf,
483                                  is_compute ? VK_PIPELINE_BIND_POINT_COMPUTE : VK_PIPELINE_BIND_POINT_GRAPHICS,
484                                  /* set index incremented by 1 to account for push set */
485                                  pg->layout, type + 1, 1, &desc_sets[type],
486                                  0, NULL);
487          bdd->sets[is_compute][type + 1] = desc_sets[type];
488       }
489    }
490    u_foreach_bit(type, bind_sets & ~changed_sets) {
491       if (!pg->dd->layout_key[type])
492          bdd->sets[is_compute][type + 1] = ctx->dd->dummy_set;
493       assert(bdd->sets[is_compute][type + 1]);
494       VKSCR(CmdBindDescriptorSets)(bs->cmdbuf,
495                               is_compute ? VK_PIPELINE_BIND_POINT_COMPUTE : VK_PIPELINE_BIND_POINT_GRAPHICS,
496                               /* set index incremented by 1 to account for push set */
497                               pg->layout, type + 1, 1, &bdd->sets[is_compute][type + 1],
498                               0, NULL);
499    }
500 }
501 
502 /* only called by cached manager for fbfetch handling */
503 VkDescriptorSet
zink_descriptors_alloc_lazy_push(struct zink_context * ctx)504 zink_descriptors_alloc_lazy_push(struct zink_context *ctx)
505 {
506    struct zink_batch_state *bs = ctx->batch.state;
507    struct zink_batch_descriptor_data_lazy *bdd = bdd_lazy(bs);
508    struct zink_screen *screen = zink_screen(ctx->base.screen);
509    VkDescriptorSet push_set = VK_NULL_HANDLE;
510    if (!bdd->push_pool[0]) {
511       bdd->push_pool[0] = create_push_pool(screen, bdd, false, true);
512       bdd->has_fbfetch = true;
513    }
514    struct zink_descriptor_pool *pool = check_push_pool_alloc(ctx, bdd->push_pool[0], bdd, false);
515    push_set = get_descriptor_set_lazy(pool);
516    if (!push_set)
517       mesa_loge("ZINK: failed to get push descriptor set!");
518    return push_set;
519 }
520 
521 void
zink_descriptors_update_lazy(struct zink_context * ctx,bool is_compute)522 zink_descriptors_update_lazy(struct zink_context *ctx, bool is_compute)
523 {
524    struct zink_batch_state *bs = ctx->batch.state;
525    struct zink_batch_descriptor_data_lazy *bdd = bdd_lazy(bs);
526    struct zink_program *pg = is_compute ? &ctx->curr_compute->base : &ctx->curr_program->base;
527    struct zink_screen *screen = zink_screen(ctx->base.screen);
528    bool have_KHR_push_descriptor = screen->info.have_KHR_push_descriptor;
529 
530    bool batch_changed = !bdd->pg[is_compute];
531    if (batch_changed) {
532       /* update all sets and bind null sets */
533       dd_lazy(ctx)->state_changed[is_compute] = pg->dd->binding_usage;
534       dd_lazy(ctx)->push_state_changed[is_compute] = !!pg->dd->push_usage;
535    }
536 
537    if (pg != bdd->pg[is_compute]) {
538       /* if we don't already know that we have to update all sets,
539        * check to see if any dsls changed
540        *
541        * also always update the dsl pointers on program change
542        */
543        for (unsigned i = 0; i < ARRAY_SIZE(bdd->dsl[is_compute]); i++) {
544           /* push set is already detected, start at 1 */
545           if (bdd->dsl[is_compute][i] != pg->dsl[i + 1])
546              dd_lazy(ctx)->state_changed[is_compute] |= BITFIELD_BIT(i);
547           bdd->dsl[is_compute][i] = pg->dsl[i + 1];
548        }
549        dd_lazy(ctx)->push_state_changed[is_compute] |= bdd->push_usage[is_compute] != pg->dd->push_usage;
550        bdd->push_usage[is_compute] = pg->dd->push_usage;
551    }
552 
553    uint8_t changed_sets = pg->dd->binding_usage & dd_lazy(ctx)->state_changed[is_compute];
554    bool need_push = pg->dd->push_usage &&
555                     (dd_lazy(ctx)->push_state_changed[is_compute] || batch_changed);
556    VkDescriptorSet push_set = VK_NULL_HANDLE;
557    if (need_push && !have_KHR_push_descriptor) {
558       struct zink_descriptor_pool *pool = check_push_pool_alloc(ctx, bdd->push_pool[pg->is_compute], bdd, pg->is_compute);
559       push_set = get_descriptor_set_lazy(pool);
560       if (!push_set) {
561          mesa_loge("ZINK: failed to get push descriptor set!");
562          /* just jam something in to avoid a hang */
563          push_set = ctx->dd->dummy_set;
564       }
565    }
566    /*
567     * when binding a pipeline, the pipeline can correctly access any previously bound
568     * descriptor sets which were bound with compatible pipeline layouts
569     * VK 14.2.2
570     */
571    uint8_t bind_sets = bdd->pg[is_compute] && bdd->compat_id[is_compute] == pg->compat_id ? 0 : pg->dd->binding_usage;
572    if (pg->dd->push_usage && (dd_lazy(ctx)->push_state_changed[is_compute] || bind_sets)) {
573       if (have_KHR_push_descriptor) {
574          if (dd_lazy(ctx)->push_state_changed[is_compute])
575             VKCTX(CmdPushDescriptorSetWithTemplateKHR)(bs->cmdbuf, pg->dd->push_template,
576                                                         pg->layout, 0, ctx);
577       } else {
578          if (dd_lazy(ctx)->push_state_changed[is_compute]) {
579             VKCTX(UpdateDescriptorSetWithTemplate)(screen->dev, push_set, pg->dd->push_template, ctx);
580             bdd->sets[is_compute][0] = push_set;
581          }
582          assert(push_set || bdd->sets[is_compute][0]);
583          VKCTX(CmdBindDescriptorSets)(bs->cmdbuf,
584                                  is_compute ? VK_PIPELINE_BIND_POINT_COMPUTE : VK_PIPELINE_BIND_POINT_GRAPHICS,
585                                  pg->layout, 0, 1, push_set ? &push_set : &bdd->sets[is_compute][0],
586                                  0, NULL);
587       }
588       dd_lazy(ctx)->push_state_changed[is_compute] = false;
589    } else if (dd_lazy(ctx)->push_state_changed[is_compute] || bind_sets) {
590       VKCTX(CmdBindDescriptorSets)(bs->cmdbuf,
591                               is_compute ? VK_PIPELINE_BIND_POINT_COMPUTE : VK_PIPELINE_BIND_POINT_GRAPHICS,
592                               pg->layout, 0, 1, &ctx->dd->dummy_set,
593                               0, NULL);
594       dd_lazy(ctx)->push_state_changed[is_compute] = false;
595    }
596    zink_descriptors_update_lazy_masked(ctx, is_compute, changed_sets, bind_sets);
597    if (pg->dd->bindless && unlikely(!ctx->dd->bindless_bound)) {
598       VKCTX(CmdBindDescriptorSets)(ctx->batch.state->cmdbuf, is_compute ? VK_PIPELINE_BIND_POINT_COMPUTE : VK_PIPELINE_BIND_POINT_GRAPHICS,
599                                    pg->layout, ZINK_DESCRIPTOR_BINDLESS, 1, &ctx->dd->bindless_set,
600                                    0, NULL);
601       ctx->dd->bindless_bound = true;
602    }
603    bdd->pg[is_compute] = pg;
604    ctx->dd->pg[is_compute] = pg;
605    bdd->compat_id[is_compute] = pg->compat_id;
606    dd_lazy(ctx)->state_changed[is_compute] = false;
607 }
608 
609 void
zink_context_invalidate_descriptor_state_lazy(struct zink_context * ctx,enum pipe_shader_type shader,enum zink_descriptor_type type,unsigned start,unsigned count)610 zink_context_invalidate_descriptor_state_lazy(struct zink_context *ctx, enum pipe_shader_type shader, enum zink_descriptor_type type, unsigned start, unsigned count)
611 {
612    if (type == ZINK_DESCRIPTOR_TYPE_UBO && !start)
613       dd_lazy(ctx)->push_state_changed[shader == PIPE_SHADER_COMPUTE] = true;
614    else
615       dd_lazy(ctx)->state_changed[shader == PIPE_SHADER_COMPUTE] |= BITFIELD_BIT(type);
616 }
617 
618 void
zink_batch_descriptor_deinit_lazy(struct zink_screen * screen,struct zink_batch_state * bs)619 zink_batch_descriptor_deinit_lazy(struct zink_screen *screen, struct zink_batch_state *bs)
620 {
621    if (!bs->dd)
622       return;
623    struct zink_batch_descriptor_data_lazy *bdd = bdd_lazy(bs);
624    if (screen->info.have_KHR_descriptor_update_template) {
625       for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
626          hash_table_foreach(&bdd->pools[i], entry) {
627             struct zink_descriptor_pool *pool = (void*)entry->data;
628             VKSCR(DestroyDescriptorPool)(screen->dev, pool->pool, NULL);
629          }
630       }
631       if (bdd->push_pool[0])
632          VKSCR(DestroyDescriptorPool)(screen->dev, bdd->push_pool[0]->pool, NULL);
633       if (bdd->push_pool[1])
634          VKSCR(DestroyDescriptorPool)(screen->dev, bdd->push_pool[1]->pool, NULL);
635    }
636    ralloc_free(bs->dd);
637 }
638 
639 static void
pool_destroy(struct zink_screen * screen,struct zink_descriptor_pool * pool)640 pool_destroy(struct zink_screen *screen, struct zink_descriptor_pool *pool)
641 {
642    VKSCR(DestroyDescriptorPool)(screen->dev, pool->pool, NULL);
643    ralloc_free(pool);
644 }
645 
646 void
zink_batch_descriptor_reset_lazy(struct zink_screen * screen,struct zink_batch_state * bs)647 zink_batch_descriptor_reset_lazy(struct zink_screen *screen, struct zink_batch_state *bs)
648 {
649    if (!screen->info.have_KHR_descriptor_update_template)
650       return;
651    struct zink_batch_descriptor_data_lazy *bdd = bdd_lazy(bs);
652    for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
653       hash_table_foreach(&bdd->pools[i], entry) {
654          const struct zink_descriptor_layout_key *key = entry->key;
655          struct zink_descriptor_pool *pool = (void*)entry->data;
656          if (key->use_count)
657             pool->set_idx = 0;
658          else {
659             pool_destroy(screen, pool);
660             _mesa_hash_table_remove(&bdd->pools[i], entry);
661          }
662       }
663    }
664    for (unsigned i = 0; i < 2; i++) {
665       bdd->pg[i] = NULL;
666       if (bdd->push_pool[i])
667          bdd->push_pool[i]->set_idx = 0;
668    }
669    while (util_dynarray_num_elements(&bdd->overflowed_pools, struct zink_descriptor_pool*)) {
670       struct zink_descriptor_pool *pool = util_dynarray_pop(&bdd->overflowed_pools, struct zink_descriptor_pool*);
671       pool_destroy(screen, pool);
672    }
673 }
674 
675 bool
zink_batch_descriptor_init_lazy(struct zink_screen * screen,struct zink_batch_state * bs)676 zink_batch_descriptor_init_lazy(struct zink_screen *screen, struct zink_batch_state *bs)
677 {
678    bs->dd = (void*)rzalloc(bs, struct zink_batch_descriptor_data_lazy);
679    if (!bs->dd)
680       return false;
681    if (!screen->info.have_KHR_descriptor_update_template)
682       return true;
683    struct zink_batch_descriptor_data_lazy *bdd = bdd_lazy(bs);
684    for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
685       if (!_mesa_hash_table_init(&bdd->pools[i], bs->dd, _mesa_hash_pointer, _mesa_key_pointer_equal))
686          return false;
687    }
688    util_dynarray_init(&bdd->overflowed_pools, bs->dd);
689    if (!screen->info.have_KHR_push_descriptor) {
690       bdd->push_pool[0] = create_push_pool(screen, bdd, false, false);
691       bdd->push_pool[1] = create_push_pool(screen, bdd, true, false);
692    }
693    return true;
694 }
695 
696 static void
init_push_template_entry(VkDescriptorUpdateTemplateEntry * entry,unsigned i)697 init_push_template_entry(VkDescriptorUpdateTemplateEntry *entry, unsigned i)
698 {
699    entry->dstBinding = tgsi_processor_to_shader_stage(i);
700    entry->descriptorCount = 1;
701    entry->descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
702    entry->offset = offsetof(struct zink_context, di.ubos[i][0]);
703    entry->stride = sizeof(VkDescriptorBufferInfo);
704 }
705 
706 bool
zink_descriptors_init_lazy(struct zink_context * ctx)707 zink_descriptors_init_lazy(struct zink_context *ctx)
708 {
709    struct zink_screen *screen = zink_screen(ctx->base.screen);
710    ctx->dd = (void*)rzalloc(ctx, struct zink_descriptor_data_lazy);
711    if (!ctx->dd)
712       return false;
713 
714    if (screen->descriptor_mode == ZINK_DESCRIPTOR_MODE_NOTEMPLATES)
715       printf("ZINK: CACHED/NOTEMPLATES DESCRIPTORS\n");
716    else if (screen->info.have_KHR_descriptor_update_template) {
717       for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++) {
718          VkDescriptorUpdateTemplateEntry *entry = &dd_lazy(ctx)->push_entries[i];
719          init_push_template_entry(entry, i);
720       }
721       init_push_template_entry(&dd_lazy(ctx)->compute_push_entry, PIPE_SHADER_COMPUTE);
722       VkDescriptorUpdateTemplateEntry *entry = &dd_lazy(ctx)->push_entries[ZINK_SHADER_COUNT]; //fbfetch
723       entry->dstBinding = ZINK_FBFETCH_BINDING;
724       entry->descriptorCount = 1;
725       entry->descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
726       entry->offset = offsetof(struct zink_context, di.fbfetch);
727       entry->stride = sizeof(VkDescriptorImageInfo);
728       if (screen->descriptor_mode == ZINK_DESCRIPTOR_MODE_LAZY)
729          printf("ZINK: USING LAZY DESCRIPTORS\n");
730    }
731    struct zink_descriptor_layout_key *layout_key;
732    if (!zink_descriptor_util_push_layouts_get(ctx, ctx->dd->push_dsl, ctx->dd->push_layout_keys))
733       return false;
734 
735    ctx->dd->dummy_dsl = zink_descriptor_util_layout_get(ctx, 0, NULL, 0, &layout_key);
736    if (!ctx->dd->dummy_dsl)
737       return false;
738    VkDescriptorPoolSize null_size = {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1};
739    ctx->dd->dummy_pool = create_pool(screen, 1, &null_size, 0);
740    zink_descriptor_util_alloc_sets(screen, ctx->dd->dummy_dsl->layout,
741                                    ctx->dd->dummy_pool, &ctx->dd->dummy_set, 1);
742    zink_descriptor_util_init_null_set(ctx, ctx->dd->dummy_set);
743 
744    return true;
745 }
746 
747 void
zink_descriptors_deinit_lazy(struct zink_context * ctx)748 zink_descriptors_deinit_lazy(struct zink_context *ctx)
749 {
750    if (ctx->dd) {
751       struct zink_screen *screen = zink_screen(ctx->base.screen);
752       if (ctx->dd->dummy_pool)
753          VKSCR(DestroyDescriptorPool)(screen->dev, ctx->dd->dummy_pool, NULL);
754       if (ctx->dd->push_dsl[0])
755          VKSCR(DestroyDescriptorSetLayout)(screen->dev, ctx->dd->push_dsl[0]->layout, NULL);
756       if (ctx->dd->push_dsl[1])
757          VKSCR(DestroyDescriptorSetLayout)(screen->dev, ctx->dd->push_dsl[1]->layout, NULL);
758    }
759    ralloc_free(ctx->dd);
760 }
761