• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2020 Mike Blumenkrantz
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Mike Blumenkrantz <michael.blumenkrantz@gmail.com>
25  */
26 
27 #include "tgsi/tgsi_from_mesa.h"
28 
29 
30 
31 #include "zink_context.h"
32 #include "zink_descriptors.h"
33 #include "zink_program.h"
34 #include "zink_render_pass.h"
35 #include "zink_resource.h"
36 #include "zink_screen.h"
37 
38 #define XXH_INLINE_ALL
39 #include "util/xxhash.h"
40 
41 
42 struct zink_descriptor_pool {
43    struct pipe_reference reference;
44    enum zink_descriptor_type type;
45    struct hash_table *desc_sets;
46    struct hash_table *free_desc_sets;
47    struct util_dynarray alloc_desc_sets;
48    const struct zink_descriptor_pool_key *key;
49    VkDescriptorPool descpool;
50    unsigned num_resources;
51    unsigned num_sets_allocated;
52    simple_mtx_t mtx;
53 };
54 
55 struct zink_descriptor_set {
56    struct zink_descriptor_pool *pool;
57    struct pipe_reference reference; //incremented for batch usage
58    VkDescriptorSet desc_set;
59    uint32_t hash;
60    bool invalid;
61    bool punted;
62    bool recycled;
63    uint8_t compacted; //bitmask of zink_descriptor_type
64    struct zink_descriptor_state_key key;
65    struct zink_batch_usage *batch_uses;
66 #ifndef NDEBUG
67    /* for extra debug asserts */
68    unsigned num_resources;
69 #endif
70    union {
71       struct zink_resource_object **res_objs;
72       struct {
73          struct zink_descriptor_surface *surfaces;
74          struct zink_sampler_state **sampler_states;
75       };
76    };
77 };
78 
79 union zink_program_descriptor_refs {
80    struct zink_resource **res;
81    struct zink_descriptor_surface *dsurf;
82    struct {
83       struct zink_descriptor_surface *dsurf;
84       struct zink_sampler_state **sampler_state;
85    } sampler;
86 };
87 
88 struct zink_program_descriptor_data_cached {
89    struct zink_program_descriptor_data base;
90    struct zink_descriptor_pool *pool[ZINK_DESCRIPTOR_TYPES];
91    struct zink_descriptor_set *last_set[ZINK_DESCRIPTOR_TYPES];
92    unsigned num_refs[ZINK_DESCRIPTOR_TYPES];
93    union zink_program_descriptor_refs *refs[ZINK_DESCRIPTOR_TYPES];
94    unsigned cache_misses[ZINK_DESCRIPTOR_TYPES];
95 };
96 
97 
98 static inline struct zink_program_descriptor_data_cached *
pdd_cached(struct zink_program * pg)99 pdd_cached(struct zink_program *pg)
100 {
101    return (struct zink_program_descriptor_data_cached*)pg->dd;
102 }
103 
104 static bool
batch_add_desc_set(struct zink_batch * batch,struct zink_descriptor_set * zds)105 batch_add_desc_set(struct zink_batch *batch, struct zink_descriptor_set *zds)
106 {
107    if (zink_batch_usage_matches(zds->batch_uses, batch->state) ||
108        !batch_ptr_add_usage(batch, batch->state->dd->desc_sets, zds))
109       return false;
110    pipe_reference(NULL, &zds->reference);
111    pipe_reference(NULL, &zds->pool->reference);
112    zink_batch_usage_set(&zds->batch_uses, batch->state);
113    return true;
114 }
115 
116 static void
debug_describe_zink_descriptor_pool(char * buf,const struct zink_descriptor_pool * ptr)117 debug_describe_zink_descriptor_pool(char *buf, const struct zink_descriptor_pool *ptr)
118 {
119    sprintf(buf, "zink_descriptor_pool");
120 }
121 
122 static inline uint32_t
get_sampler_view_hash(const struct zink_sampler_view * sampler_view)123 get_sampler_view_hash(const struct zink_sampler_view *sampler_view)
124 {
125    if (!sampler_view)
126       return 0;
127    return sampler_view->base.target == PIPE_BUFFER ?
128           sampler_view->buffer_view->hash : sampler_view->image_view->hash;
129 }
130 
131 static inline uint32_t
get_image_view_hash(const struct zink_image_view * image_view)132 get_image_view_hash(const struct zink_image_view *image_view)
133 {
134    if (!image_view || !image_view->base.resource)
135       return 0;
136    return image_view->base.resource->target == PIPE_BUFFER ?
137           image_view->buffer_view->hash : image_view->surface->hash;
138 }
139 
140 uint32_t
zink_get_sampler_view_hash(struct zink_context * ctx,struct zink_sampler_view * sampler_view,bool is_buffer)141 zink_get_sampler_view_hash(struct zink_context *ctx, struct zink_sampler_view *sampler_view, bool is_buffer)
142 {
143    return get_sampler_view_hash(sampler_view) ? get_sampler_view_hash(sampler_view) :
144           (is_buffer ? zink_screen(ctx->base.screen)->null_descriptor_hashes.buffer_view :
145                        zink_screen(ctx->base.screen)->null_descriptor_hashes.image_view);
146 }
147 
148 uint32_t
zink_get_image_view_hash(struct zink_context * ctx,struct zink_image_view * image_view,bool is_buffer)149 zink_get_image_view_hash(struct zink_context *ctx, struct zink_image_view *image_view, bool is_buffer)
150 {
151    return get_image_view_hash(image_view) ? get_image_view_hash(image_view) :
152           (is_buffer ? zink_screen(ctx->base.screen)->null_descriptor_hashes.buffer_view :
153                        zink_screen(ctx->base.screen)->null_descriptor_hashes.image_view);
154 }
155 
156 #ifndef NDEBUG
157 static uint32_t
get_descriptor_surface_hash(struct zink_context * ctx,struct zink_descriptor_surface * dsurf)158 get_descriptor_surface_hash(struct zink_context *ctx, struct zink_descriptor_surface *dsurf)
159 {
160    return dsurf->is_buffer ? (dsurf->bufferview ? dsurf->bufferview->hash : zink_screen(ctx->base.screen)->null_descriptor_hashes.buffer_view) :
161                              (dsurf->surface ? dsurf->surface->hash : zink_screen(ctx->base.screen)->null_descriptor_hashes.image_view);
162 }
163 #endif
164 
165 static bool
desc_state_equal(const void * a,const void * b)166 desc_state_equal(const void *a, const void *b)
167 {
168    const struct zink_descriptor_state_key *a_k = (void*)a;
169    const struct zink_descriptor_state_key *b_k = (void*)b;
170 
171    for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++) {
172       if (a_k->exists[i] != b_k->exists[i])
173          return false;
174       if (a_k->exists[i] && b_k->exists[i] &&
175           a_k->state[i] != b_k->state[i])
176          return false;
177    }
178    return true;
179 }
180 
181 static uint32_t
desc_state_hash(const void * key)182 desc_state_hash(const void *key)
183 {
184    const struct zink_descriptor_state_key *d_key = (void*)key;
185    uint32_t hash = 0;
186    bool first = true;
187    for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++) {
188       if (d_key->exists[i]) {
189          if (!first)
190             hash ^= d_key->state[i];
191          else
192             hash = d_key->state[i];
193          first = false;
194       }
195    }
196    return hash;
197 }
198 
199 static void
pop_desc_set_ref(struct zink_descriptor_set * zds,struct util_dynarray * refs)200 pop_desc_set_ref(struct zink_descriptor_set *zds, struct util_dynarray *refs)
201 {
202    size_t size = sizeof(struct zink_descriptor_reference);
203    unsigned num_elements = refs->size / size;
204    for (unsigned i = 0; i < num_elements; i++) {
205       struct zink_descriptor_reference *ref = util_dynarray_element(refs, struct zink_descriptor_reference, i);
206       if (&zds->invalid == ref->invalid) {
207          memcpy(util_dynarray_element(refs, struct zink_descriptor_reference, i),
208                 util_dynarray_pop_ptr(refs, struct zink_descriptor_reference), size);
209          break;
210       }
211    }
212 }
213 
214 static void
descriptor_set_invalidate(struct zink_descriptor_set * zds)215 descriptor_set_invalidate(struct zink_descriptor_set *zds)
216 {
217    zds->invalid = true;
218    unsigned idx = 0;
219    for (unsigned i = 0; i < zds->pool->key->layout->num_bindings; i++) {
220       for (unsigned j = 0; j < zds->pool->key->layout->bindings[i].descriptorCount; j++) {
221          switch (zds->pool->type) {
222          case ZINK_DESCRIPTOR_TYPE_UBO:
223          case ZINK_DESCRIPTOR_TYPE_SSBO:
224             if (zds->res_objs[idx])
225                pop_desc_set_ref(zds, &zds->res_objs[idx]->desc_set_refs.refs);
226             zds->res_objs[idx] = NULL;
227             break;
228          case ZINK_DESCRIPTOR_TYPE_IMAGE:
229             if (zds->surfaces[idx].is_buffer) {
230                if (zds->surfaces[idx].bufferview)
231                   pop_desc_set_ref(zds, &zds->surfaces[idx].bufferview->desc_set_refs.refs);
232                zds->surfaces[idx].bufferview = NULL;
233             } else {
234                if (zds->surfaces[idx].surface)
235                   pop_desc_set_ref(zds, &zds->surfaces[idx].surface->desc_set_refs.refs);
236                zds->surfaces[idx].surface = NULL;
237             }
238             break;
239          case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
240             if (zds->surfaces[idx].is_buffer) {
241                if (zds->surfaces[idx].bufferview)
242                   pop_desc_set_ref(zds, &zds->surfaces[idx].bufferview->desc_set_refs.refs);
243                zds->surfaces[idx].bufferview = NULL;
244             } else {
245                if (zds->surfaces[idx].surface)
246                   pop_desc_set_ref(zds, &zds->surfaces[idx].surface->desc_set_refs.refs);
247                zds->surfaces[idx].surface = NULL;
248             }
249             if (zds->sampler_states[idx])
250                pop_desc_set_ref(zds, &zds->sampler_states[idx]->desc_set_refs.refs);
251             zds->sampler_states[idx] = NULL;
252             break;
253          default:
254             break;
255          }
256          idx++;
257       }
258    }
259 }
260 
261 static void
descriptor_pool_clear(struct hash_table * ht)262 descriptor_pool_clear(struct hash_table *ht)
263 {
264    hash_table_foreach(ht, entry) {
265       struct zink_descriptor_set *zds = entry->data;
266       descriptor_set_invalidate(zds);
267    }
268 }
269 
270 static void
descriptor_pool_free(struct zink_screen * screen,struct zink_descriptor_pool * pool)271 descriptor_pool_free(struct zink_screen *screen, struct zink_descriptor_pool *pool)
272 {
273    if (!pool)
274       return;
275    if (pool->descpool)
276       VKSCR(DestroyDescriptorPool)(screen->dev, pool->descpool, NULL);
277 
278    simple_mtx_lock(&pool->mtx);
279    if (pool->desc_sets)
280       descriptor_pool_clear(pool->desc_sets);
281    if (pool->free_desc_sets)
282       descriptor_pool_clear(pool->free_desc_sets);
283    if (pool->desc_sets)
284       _mesa_hash_table_destroy(pool->desc_sets, NULL);
285    if (pool->free_desc_sets)
286       _mesa_hash_table_destroy(pool->free_desc_sets, NULL);
287 
288    simple_mtx_unlock(&pool->mtx);
289    util_dynarray_fini(&pool->alloc_desc_sets);
290    simple_mtx_destroy(&pool->mtx);
291    ralloc_free(pool);
292 }
293 
294 static void
descriptor_pool_delete(struct zink_context * ctx,struct zink_descriptor_pool * pool)295 descriptor_pool_delete(struct zink_context *ctx, struct zink_descriptor_pool *pool)
296 {
297    struct zink_screen *screen = zink_screen(ctx->base.screen);
298    if (!pool)
299       return;
300    _mesa_hash_table_remove_key(ctx->dd->descriptor_pools[pool->type], pool->key);
301    descriptor_pool_free(screen, pool);
302 }
303 
304 static struct zink_descriptor_pool *
descriptor_pool_create(struct zink_screen * screen,enum zink_descriptor_type type,const struct zink_descriptor_pool_key * pool_key)305 descriptor_pool_create(struct zink_screen *screen, enum zink_descriptor_type type,
306                        const struct zink_descriptor_pool_key *pool_key)
307 {
308    struct zink_descriptor_pool *pool = rzalloc(NULL, struct zink_descriptor_pool);
309    if (!pool)
310       return NULL;
311    pipe_reference_init(&pool->reference, 1);
312    pool->type = type;
313    pool->key = pool_key;
314    simple_mtx_init(&pool->mtx, mtx_plain);
315    for (unsigned i = 0; i < pool_key->layout->num_bindings; i++) {
316        pool->num_resources += pool_key->layout->bindings[i].descriptorCount;
317    }
318    pool->desc_sets = _mesa_hash_table_create(NULL, desc_state_hash, desc_state_equal);
319    if (!pool->desc_sets)
320       goto fail;
321 
322    pool->free_desc_sets = _mesa_hash_table_create(NULL, desc_state_hash, desc_state_equal);
323    if (!pool->free_desc_sets)
324       goto fail;
325 
326    util_dynarray_init(&pool->alloc_desc_sets, NULL);
327 
328    VkDescriptorPoolCreateInfo dpci = {0};
329    dpci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
330    dpci.pPoolSizes = pool_key->sizes;
331    dpci.poolSizeCount = pool_key->num_type_sizes;
332    dpci.flags = 0;
333    dpci.maxSets = ZINK_DEFAULT_MAX_DESCS;
334    assert(pool_key->num_type_sizes);
335    VkResult result = VKSCR(CreateDescriptorPool)(screen->dev, &dpci, 0, &pool->descpool);
336    if (result != VK_SUCCESS) {
337       mesa_loge("ZINK: vkCreateDescriptorPool failed (%s)", vk_Result_to_str(result));
338       goto fail;
339    }
340 
341    return pool;
342 fail:
343    descriptor_pool_free(screen, pool);
344    return NULL;
345 }
346 
347 static VkDescriptorSetLayout
descriptor_layout_create(struct zink_screen * screen,enum zink_descriptor_type t,VkDescriptorSetLayoutBinding * bindings,unsigned num_bindings)348 descriptor_layout_create(struct zink_screen *screen, enum zink_descriptor_type t, VkDescriptorSetLayoutBinding *bindings, unsigned num_bindings)
349 {
350    VkDescriptorSetLayout dsl;
351    VkDescriptorSetLayoutCreateInfo dcslci = {0};
352    dcslci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
353    dcslci.pNext = NULL;
354    VkDescriptorSetLayoutBindingFlagsCreateInfo fci = {0};
355    VkDescriptorBindingFlags flags[ZINK_MAX_DESCRIPTORS_PER_TYPE];
356    if (zink_descriptor_mode == ZINK_DESCRIPTOR_MODE_LAZY) {
357       dcslci.pNext = &fci;
358       if (t == ZINK_DESCRIPTOR_TYPES)
359          dcslci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR;
360       fci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO;
361       fci.bindingCount = num_bindings;
362       fci.pBindingFlags = flags;
363       for (unsigned i = 0; i < num_bindings; i++) {
364          flags[i] = 0;
365       }
366    }
367    dcslci.bindingCount = num_bindings;
368    dcslci.pBindings = bindings;
369    VkDescriptorSetLayoutSupport supp;
370    supp.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT;
371    supp.pNext = NULL;
372    supp.supported = VK_FALSE;
373    if (VKSCR(GetDescriptorSetLayoutSupport)) {
374       VKSCR(GetDescriptorSetLayoutSupport)(screen->dev, &dcslci, &supp);
375       if (supp.supported == VK_FALSE) {
376          debug_printf("vkGetDescriptorSetLayoutSupport claims layout is unsupported\n");
377          return VK_NULL_HANDLE;
378       }
379    }
380    VkResult result = VKSCR(CreateDescriptorSetLayout)(screen->dev, &dcslci, 0, &dsl);
381    if (result != VK_SUCCESS)
382       mesa_loge("ZINK: vkCreateDescriptorSetLayout failed (%s)", vk_Result_to_str(result));
383    return dsl;
384 }
385 
386 static uint32_t
hash_descriptor_layout(const void * key)387 hash_descriptor_layout(const void *key)
388 {
389    uint32_t hash = 0;
390    const struct zink_descriptor_layout_key *k = key;
391    hash = XXH32(&k->num_bindings, sizeof(unsigned), hash);
392    /* only hash first 3 members: no holes and the rest are always constant */
393    for (unsigned i = 0; i < k->num_bindings; i++)
394       hash = XXH32(&k->bindings[i], offsetof(VkDescriptorSetLayoutBinding, stageFlags), hash);
395 
396    return hash;
397 }
398 
399 static bool
equals_descriptor_layout(const void * a,const void * b)400 equals_descriptor_layout(const void *a, const void *b)
401 {
402    const struct zink_descriptor_layout_key *a_k = a;
403    const struct zink_descriptor_layout_key *b_k = b;
404    return a_k->num_bindings == b_k->num_bindings &&
405           !memcmp(a_k->bindings, b_k->bindings, a_k->num_bindings * sizeof(VkDescriptorSetLayoutBinding));
406 }
407 
408 static struct zink_descriptor_layout *
create_layout(struct zink_context * ctx,enum zink_descriptor_type type,VkDescriptorSetLayoutBinding * bindings,unsigned num_bindings,struct zink_descriptor_layout_key ** layout_key)409 create_layout(struct zink_context *ctx, enum zink_descriptor_type type,
410               VkDescriptorSetLayoutBinding *bindings, unsigned num_bindings,
411               struct zink_descriptor_layout_key **layout_key)
412 {
413    struct zink_screen *screen = zink_screen(ctx->base.screen);
414    VkDescriptorSetLayout dsl = descriptor_layout_create(screen, type, bindings, num_bindings);
415    if (!dsl)
416       return NULL;
417 
418    struct zink_descriptor_layout_key *k = ralloc(ctx, struct zink_descriptor_layout_key);
419    k->num_bindings = num_bindings;
420    if (num_bindings) {
421       size_t bindings_size = num_bindings * sizeof(VkDescriptorSetLayoutBinding);
422       k->bindings = ralloc_size(k, bindings_size);
423       if (!k->bindings) {
424          ralloc_free(k);
425          VKSCR(DestroyDescriptorSetLayout)(screen->dev, dsl, NULL);
426          return NULL;
427       }
428       memcpy(k->bindings, bindings, bindings_size);
429    }
430 
431    struct zink_descriptor_layout *layout = rzalloc(ctx, struct zink_descriptor_layout);
432    layout->layout = dsl;
433    *layout_key = k;
434    return layout;
435 }
436 
437 struct zink_descriptor_layout *
zink_descriptor_util_layout_get(struct zink_context * ctx,enum zink_descriptor_type type,VkDescriptorSetLayoutBinding * bindings,unsigned num_bindings,struct zink_descriptor_layout_key ** layout_key)438 zink_descriptor_util_layout_get(struct zink_context *ctx, enum zink_descriptor_type type,
439                       VkDescriptorSetLayoutBinding *bindings, unsigned num_bindings,
440                       struct zink_descriptor_layout_key **layout_key)
441 {
442    uint32_t hash = 0;
443    struct zink_descriptor_layout_key key = {
444       .num_bindings = num_bindings,
445       .bindings = bindings,
446    };
447 
448    if (type != ZINK_DESCRIPTOR_TYPES) {
449       hash = hash_descriptor_layout(&key);
450       struct hash_entry *he = _mesa_hash_table_search_pre_hashed(&ctx->desc_set_layouts[type], hash, &key);
451       if (he) {
452          *layout_key = (void*)he->key;
453          return he->data;
454       }
455    }
456 
457    struct zink_descriptor_layout *layout = create_layout(ctx, type, bindings, num_bindings, layout_key);
458    if (layout && type != ZINK_DESCRIPTOR_TYPES) {
459       _mesa_hash_table_insert_pre_hashed(&ctx->desc_set_layouts[type], hash, *layout_key, layout);
460    }
461    return layout;
462 }
463 
464 
465 static uint32_t
hash_descriptor_pool_key(const void * key)466 hash_descriptor_pool_key(const void *key)
467 {
468    uint32_t hash = 0;
469    const struct zink_descriptor_pool_key *k = key;
470    hash = XXH32(&k->layout, sizeof(void*), hash);
471    for (unsigned i = 0; i < k->num_type_sizes; i++)
472       hash = XXH32(&k->sizes[i], sizeof(VkDescriptorPoolSize), hash);
473 
474    return hash;
475 }
476 
477 static bool
equals_descriptor_pool_key(const void * a,const void * b)478 equals_descriptor_pool_key(const void *a, const void *b)
479 {
480    const struct zink_descriptor_pool_key *a_k = a;
481    const struct zink_descriptor_pool_key *b_k = b;
482    const unsigned a_num_type_sizes = a_k->num_type_sizes;
483    const unsigned b_num_type_sizes = b_k->num_type_sizes;
484    return a_k->layout == b_k->layout &&
485           a_num_type_sizes == b_num_type_sizes &&
486           !memcmp(a_k->sizes, b_k->sizes, b_num_type_sizes * sizeof(VkDescriptorPoolSize));
487 }
488 
489 struct zink_descriptor_pool_key *
zink_descriptor_util_pool_key_get(struct zink_context * ctx,enum zink_descriptor_type type,struct zink_descriptor_layout_key * layout_key,VkDescriptorPoolSize * sizes,unsigned num_type_sizes)490 zink_descriptor_util_pool_key_get(struct zink_context *ctx, enum zink_descriptor_type type,
491                                   struct zink_descriptor_layout_key *layout_key,
492                                   VkDescriptorPoolSize *sizes, unsigned num_type_sizes)
493 {
494    uint32_t hash = 0;
495    struct zink_descriptor_pool_key key;
496    key.num_type_sizes = num_type_sizes;
497    if (type != ZINK_DESCRIPTOR_TYPES) {
498       key.layout = layout_key;
499       memcpy(key.sizes, sizes, num_type_sizes * sizeof(VkDescriptorPoolSize));
500       hash = hash_descriptor_pool_key(&key);
501       struct set_entry *he = _mesa_set_search_pre_hashed(&ctx->desc_pool_keys[type], hash, &key);
502       if (he)
503          return (void*)he->key;
504    }
505 
506    struct zink_descriptor_pool_key *pool_key = rzalloc(ctx, struct zink_descriptor_pool_key);
507    pool_key->layout = layout_key;
508    pool_key->num_type_sizes = num_type_sizes;
509    assert(pool_key->num_type_sizes);
510    memcpy(pool_key->sizes, sizes, num_type_sizes * sizeof(VkDescriptorPoolSize));
511    if (type != ZINK_DESCRIPTOR_TYPES)
512       _mesa_set_add_pre_hashed(&ctx->desc_pool_keys[type], hash, pool_key);
513    return pool_key;
514 }
515 
516 static void
init_push_binding(VkDescriptorSetLayoutBinding * binding,unsigned i,VkDescriptorType type)517 init_push_binding(VkDescriptorSetLayoutBinding *binding, unsigned i, VkDescriptorType type)
518 {
519    binding->binding = tgsi_processor_to_shader_stage(i);
520    binding->descriptorType = type;
521    binding->descriptorCount = 1;
522    binding->stageFlags = zink_shader_stage(i);
523    binding->pImmutableSamplers = NULL;
524 }
525 
526 static VkDescriptorType
get_push_types(struct zink_screen * screen,enum zink_descriptor_type * dsl_type)527 get_push_types(struct zink_screen *screen, enum zink_descriptor_type *dsl_type)
528 {
529    *dsl_type = zink_descriptor_mode == ZINK_DESCRIPTOR_MODE_LAZY &&
530                screen->info.have_KHR_push_descriptor ? ZINK_DESCRIPTOR_TYPES : ZINK_DESCRIPTOR_TYPE_UBO;
531    return zink_descriptor_mode == ZINK_DESCRIPTOR_MODE_LAZY ?
532           VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER : VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
533 }
534 
535 static struct zink_descriptor_layout *
create_gfx_layout(struct zink_context * ctx,struct zink_descriptor_layout_key ** layout_key,bool fbfetch)536 create_gfx_layout(struct zink_context *ctx, struct zink_descriptor_layout_key **layout_key, bool fbfetch)
537 {
538    struct zink_screen *screen = zink_screen(ctx->base.screen);
539    VkDescriptorSetLayoutBinding bindings[PIPE_SHADER_TYPES];
540    enum zink_descriptor_type dsl_type;
541    VkDescriptorType vktype = get_push_types(screen, &dsl_type);
542    for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++)
543       init_push_binding(&bindings[i], i, vktype);
544    if (fbfetch) {
545       bindings[ZINK_SHADER_COUNT].binding = ZINK_FBFETCH_BINDING;
546       bindings[ZINK_SHADER_COUNT].descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
547       bindings[ZINK_SHADER_COUNT].descriptorCount = 1;
548       bindings[ZINK_SHADER_COUNT].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
549       bindings[ZINK_SHADER_COUNT].pImmutableSamplers = NULL;
550    }
551    return create_layout(ctx, dsl_type, bindings, fbfetch ? ARRAY_SIZE(bindings) : ARRAY_SIZE(bindings) - 1, layout_key);
552 }
553 
554 bool
zink_descriptor_util_push_layouts_get(struct zink_context * ctx,struct zink_descriptor_layout ** dsls,struct zink_descriptor_layout_key ** layout_keys)555 zink_descriptor_util_push_layouts_get(struct zink_context *ctx, struct zink_descriptor_layout **dsls, struct zink_descriptor_layout_key **layout_keys)
556 {
557    struct zink_screen *screen = zink_screen(ctx->base.screen);
558    VkDescriptorSetLayoutBinding compute_binding;
559    enum zink_descriptor_type dsl_type;
560    VkDescriptorType vktype = get_push_types(screen, &dsl_type);
561    init_push_binding(&compute_binding, PIPE_SHADER_COMPUTE, vktype);
562    dsls[0] = create_gfx_layout(ctx, &layout_keys[0], false);
563    dsls[1] = create_layout(ctx, dsl_type, &compute_binding, 1, &layout_keys[1]);
564    return dsls[0] && dsls[1];
565 }
566 
567 VkImageLayout
zink_descriptor_util_image_layout_eval(const struct zink_context * ctx,const struct zink_resource * res,bool is_compute)568 zink_descriptor_util_image_layout_eval(const struct zink_context *ctx, const struct zink_resource *res, bool is_compute)
569 {
570    if (res->bindless[0] || res->bindless[1]) {
571       /* bindless needs most permissive layout */
572       if (res->image_bind_count[0] || res->image_bind_count[1])
573          return VK_IMAGE_LAYOUT_GENERAL;
574       return VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
575    }
576    if (res->image_bind_count[is_compute])
577       return VK_IMAGE_LAYOUT_GENERAL;
578    if (res->aspect & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
579       if (!is_compute && res->fb_binds &&
580           ctx->gfx_pipeline_state.render_pass && ctx->gfx_pipeline_state.render_pass->state.rts[ctx->fb_state.nr_cbufs].mixed_zs)
581          return VK_IMAGE_LAYOUT_GENERAL;
582       if (res->obj->vkusage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)
583          return VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
584    }
585    return VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
586 }
587 
588 static struct zink_descriptor_pool *
descriptor_pool_get(struct zink_context * ctx,enum zink_descriptor_type type,const struct zink_descriptor_pool_key * pool_key)589 descriptor_pool_get(struct zink_context *ctx, enum zink_descriptor_type type,
590                     const struct zink_descriptor_pool_key *pool_key)
591 {
592    uint32_t hash = 0;
593    if (type != ZINK_DESCRIPTOR_TYPES) {
594       hash = hash_descriptor_pool_key(pool_key);
595       struct hash_entry *he = _mesa_hash_table_search_pre_hashed(ctx->dd->descriptor_pools[type], hash, pool_key);
596       if (he) {
597          struct zink_descriptor_pool *pool = he->data;
598          pipe_reference(NULL, &pool->reference);
599          return pool;
600       }
601    }
602    struct zink_descriptor_pool *pool = descriptor_pool_create(zink_screen(ctx->base.screen), type, pool_key);
603    if (type != ZINK_DESCRIPTOR_TYPES)
604       _mesa_hash_table_insert_pre_hashed(ctx->dd->descriptor_pools[type], hash, pool_key, pool);
605    return pool;
606 }
607 
608 static bool
get_invalidated_desc_set(struct zink_descriptor_set * zds)609 get_invalidated_desc_set(struct zink_descriptor_set *zds)
610 {
611    if (!zds->invalid)
612       return false;
613    return p_atomic_read(&zds->reference.count) == 1;
614 }
615 
616 bool
zink_descriptor_util_alloc_sets(struct zink_screen * screen,VkDescriptorSetLayout dsl,VkDescriptorPool pool,VkDescriptorSet * sets,unsigned num_sets)617 zink_descriptor_util_alloc_sets(struct zink_screen *screen, VkDescriptorSetLayout dsl, VkDescriptorPool pool, VkDescriptorSet *sets, unsigned num_sets)
618 {
619    VkDescriptorSetAllocateInfo dsai;
620    VkDescriptorSetLayout *layouts = alloca(sizeof(*layouts) * num_sets);
621    memset((void *)&dsai, 0, sizeof(dsai));
622    dsai.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
623    dsai.pNext = NULL;
624    dsai.descriptorPool = pool;
625    dsai.descriptorSetCount = num_sets;
626    for (unsigned i = 0; i < num_sets; i ++)
627       layouts[i] = dsl;
628    dsai.pSetLayouts = layouts;
629 
630    VkResult result = VKSCR(AllocateDescriptorSets)(screen->dev, &dsai, sets);
631    if (result != VK_SUCCESS) {
632       mesa_loge("ZINK: %" PRIu64 " failed to allocate descriptor set :/ (%s)", (uint64_t)dsl, vk_Result_to_str(result));
633       return false;
634    }
635    return true;
636 }
637 
638 static struct zink_descriptor_set *
allocate_desc_set(struct zink_context * ctx,struct zink_program * pg,enum zink_descriptor_type type,unsigned descs_used,bool is_compute)639 allocate_desc_set(struct zink_context *ctx, struct zink_program *pg, enum zink_descriptor_type type, unsigned descs_used, bool is_compute)
640 {
641    struct zink_screen *screen = zink_screen(ctx->base.screen);
642    bool push_set = type == ZINK_DESCRIPTOR_TYPES;
643    struct zink_descriptor_pool *pool = push_set ? ctx->dd->push_pool[is_compute] : pdd_cached(pg)->pool[type];
644 #define DESC_BUCKET_FACTOR 10
645    unsigned bucket_size = pool->key->layout->num_bindings ? DESC_BUCKET_FACTOR : 1;
646    if (pool->key->layout->num_bindings) {
647       for (unsigned desc_factor = DESC_BUCKET_FACTOR; desc_factor < descs_used; desc_factor *= DESC_BUCKET_FACTOR)
648          bucket_size = desc_factor;
649    }
650    /* never grow more than this many at a time */
651    bucket_size = MIN2(bucket_size, ZINK_DEFAULT_MAX_DESCS);
652    VkDescriptorSet *desc_set = alloca(sizeof(*desc_set) * bucket_size);
653    if (!zink_descriptor_util_alloc_sets(screen, push_set ? ctx->dd->push_dsl[is_compute]->layout : pg->dsl[type + 1], pool->descpool, desc_set, bucket_size))
654       return VK_NULL_HANDLE;
655 
656    struct zink_descriptor_set *alloc = ralloc_array(pool, struct zink_descriptor_set, bucket_size);
657    assert(alloc);
658    unsigned num_resources = pool->num_resources;
659    struct zink_resource_object **res_objs = NULL;
660    void **samplers = NULL;
661    struct zink_descriptor_surface *surfaces = NULL;
662    switch (type) {
663    case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
664       samplers = rzalloc_array(pool, void*, num_resources * bucket_size);
665       assert(samplers);
666       FALLTHROUGH;
667    case ZINK_DESCRIPTOR_TYPE_IMAGE:
668       surfaces = rzalloc_array(pool, struct zink_descriptor_surface, num_resources * bucket_size);
669       assert(surfaces);
670       break;
671    default:
672       res_objs = rzalloc_array(pool, struct zink_resource_object*, num_resources * bucket_size);
673       assert(res_objs);
674       break;
675    }
676    for (unsigned i = 0; i < bucket_size; i ++) {
677       struct zink_descriptor_set *zds = &alloc[i];
678       pipe_reference_init(&zds->reference, 1);
679       zds->pool = pool;
680       zds->hash = 0;
681       zds->batch_uses = NULL;
682       zds->invalid = true;
683       zds->punted = zds->recycled = false;
684 #ifndef NDEBUG
685       zds->num_resources = num_resources;
686 #endif
687       switch (type) {
688       case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
689          zds->sampler_states = (struct zink_sampler_state**)&samplers[i * num_resources];
690          FALLTHROUGH;
691       case ZINK_DESCRIPTOR_TYPE_IMAGE:
692          zds->surfaces = &surfaces[i * num_resources];
693          break;
694       default:
695          zds->res_objs = (struct zink_resource_object**)&res_objs[i * num_resources];
696          break;
697       }
698       zds->desc_set = desc_set[i];
699       if (i > 0)
700          util_dynarray_append(&pool->alloc_desc_sets, struct zink_descriptor_set *, zds);
701    }
702    pool->num_sets_allocated += bucket_size;
703    return alloc;
704 }
705 
706 static void
populate_zds_key(struct zink_context * ctx,enum zink_descriptor_type type,bool is_compute,struct zink_descriptor_state_key * key,uint32_t push_usage)707 populate_zds_key(struct zink_context *ctx, enum zink_descriptor_type type, bool is_compute,
708                  struct zink_descriptor_state_key *key, uint32_t push_usage)
709 {
710    if (is_compute) {
711       for (unsigned i = 1; i < ZINK_SHADER_COUNT; i++)
712          key->exists[i] = false;
713       key->exists[0] = true;
714       if (type == ZINK_DESCRIPTOR_TYPES)
715          key->state[0] = ctx->dd->push_state[is_compute];
716       else {
717          assert(ctx->dd->descriptor_states[is_compute].valid[type]);
718          key->state[0] = ctx->dd->descriptor_states[is_compute].state[type];
719       }
720    } else if (type == ZINK_DESCRIPTOR_TYPES) {
721       /* gfx only */
722       for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++) {
723          if (push_usage & BITFIELD_BIT(i)) {
724             key->exists[i] = true;
725             key->state[i] = ctx->dd->gfx_push_state[i];
726          } else
727             key->exists[i] = false;
728       }
729    } else {
730       for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++) {
731          key->exists[i] = ctx->dd->gfx_descriptor_states[i].valid[type];
732          key->state[i] = ctx->dd->gfx_descriptor_states[i].state[type];
733       }
734    }
735 }
736 
737 static void
populate_zds_key_compact(struct zink_context * ctx,enum zink_descriptor_type type,bool is_compute,struct zink_descriptor_state_key * key,uint32_t push_usage)738 populate_zds_key_compact(struct zink_context *ctx, enum zink_descriptor_type type, bool is_compute,
739                          struct zink_descriptor_state_key *key, uint32_t push_usage)
740 {
741    if (is_compute) {
742       for (unsigned i = 1; i < ZINK_SHADER_COUNT; i++)
743          key->exists[i] = false;
744       key->exists[0] = true;
745       if (type == ZINK_DESCRIPTOR_TYPES)
746          key->state[0] = ctx->dd->push_state[is_compute];
747       else {
748          assert(ctx->dd->compact_descriptor_states[is_compute].valid[type]);
749          key->state[0] = ctx->dd->compact_descriptor_states[is_compute].state[type];
750       }
751    } else if (type == ZINK_DESCRIPTOR_TYPES) {
752       /* gfx only */
753       for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++) {
754          if (push_usage & BITFIELD_BIT(i)) {
755             key->exists[i] = true;
756             key->state[i] = ctx->dd->gfx_push_state[i];
757          } else
758             key->exists[i] = false;
759       }
760    } else {
761       for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++) {
762          key->exists[i] = ctx->dd->compact_gfx_descriptor_states[i].valid[type];
763          key->state[i] = ctx->dd->compact_gfx_descriptor_states[i].state[type];
764       }
765    }
766 }
767 
768 static void
punt_invalid_set(struct zink_descriptor_set * zds,struct hash_entry * he)769 punt_invalid_set(struct zink_descriptor_set *zds, struct hash_entry *he)
770 {
771    /* this is no longer usable, so we punt it for now until it gets recycled */
772    assert(!zds->recycled);
773    if (!he)
774       he = _mesa_hash_table_search_pre_hashed(zds->pool->desc_sets, zds->hash, &zds->key);
775    _mesa_hash_table_remove(zds->pool->desc_sets, he);
776    zds->punted = true;
777 }
778 
779 static struct zink_descriptor_set *
zink_descriptor_set_get(struct zink_context * ctx,enum zink_descriptor_type type,bool is_compute,bool * cache_hit)780 zink_descriptor_set_get(struct zink_context *ctx,
781                                enum zink_descriptor_type type,
782                                bool is_compute,
783                                bool *cache_hit)
784 {
785    *cache_hit = false;
786    struct zink_screen *screen = zink_screen(ctx->base.screen);
787    struct zink_descriptor_set *zds;
788    struct zink_program *pg = is_compute ? (struct zink_program *)ctx->curr_compute : (struct zink_program *)ctx->curr_program;
789    struct zink_batch *batch = &ctx->batch;
790    bool push_set = type == ZINK_DESCRIPTOR_TYPES;
791    struct zink_descriptor_pool *pool = push_set ? ctx->dd->push_pool[is_compute] : pdd_cached(pg)->pool[type];
792    unsigned descs_used = 1;
793    assert(type <= ZINK_DESCRIPTOR_TYPES);
794 
795    assert(pool->key->layout->num_bindings);
796    assert(!screen->compact_descriptors || (type != ZINK_DESCRIPTOR_TYPE_SSBO && type != ZINK_DESCRIPTOR_TYPE_IMAGE));
797    uint32_t hash = push_set ? ctx->dd->push_state[is_compute] :
798                               screen->compact_descriptors ?
799                               ctx->dd->compact_descriptor_states[is_compute].state[type] :
800                               ctx->dd->descriptor_states[is_compute].state[type];
801 
802    struct zink_descriptor_set *last_set = push_set ? ctx->dd->last_set[is_compute] : pdd_cached(pg)->last_set[type];
803    /* if the current state hasn't changed since the last time it was used,
804     * it's impossible for this set to not be valid, which means that an
805     * early return here can be done safely and with no locking
806     */
807    if (last_set && ((push_set && !ctx->dd->changed[is_compute][ZINK_DESCRIPTOR_TYPES]) ||
808                     (!push_set && (screen->compact_descriptors ?
809                                    !ctx->dd->changed[is_compute][type] && !ctx->dd->changed[is_compute][type+ZINK_DESCRIPTOR_COMPACT] :
810                                    !ctx->dd->changed[is_compute][type])))) {
811       *cache_hit = true;
812       return last_set;
813    }
814 
815    struct zink_descriptor_state_key key;
816    if (screen->compact_descriptors)
817       populate_zds_key_compact(ctx, type, is_compute, &key, pg->dd->push_usage);
818    else
819       populate_zds_key(ctx, type, is_compute, &key, pg->dd->push_usage);
820 
821    simple_mtx_lock(&pool->mtx);
822    if (last_set && last_set->hash == hash && desc_state_equal(&last_set->key, &key)) {
823       bool was_recycled = false;
824       zds = last_set;
825       *cache_hit = !zds->invalid;
826       if (zds->recycled) {
827          struct hash_entry *he = _mesa_hash_table_search_pre_hashed(pool->free_desc_sets, hash, &key);
828          if (he) {
829             was_recycled = true;
830             _mesa_hash_table_remove(pool->free_desc_sets, he);
831          }
832          zds->recycled = false;
833       }
834       if (zds->invalid) {
835           if (zink_batch_usage_exists(zds->batch_uses))
836              punt_invalid_set(zds, NULL);
837           else {
838              if (was_recycled) {
839                 descriptor_set_invalidate(zds);
840                 goto out;
841              }
842              /* this set is guaranteed to be in pool->alloc_desc_sets */
843              goto skip_hash_tables;
844           }
845           zds = NULL;
846       }
847       if (zds)
848          goto out;
849    }
850 
851    struct hash_entry *he = _mesa_hash_table_search_pre_hashed(pool->desc_sets, hash, &key);
852    bool recycled = false, punted = false;
853    if (he) {
854        zds = (void*)he->data;
855        if (zds->invalid && zink_batch_usage_exists(zds->batch_uses)) {
856           punt_invalid_set(zds, he);
857           zds = NULL;
858           punted = true;
859        }
860    }
861    if (!he) {
862       he = _mesa_hash_table_search_pre_hashed(pool->free_desc_sets, hash, &key);
863       recycled = true;
864    }
865    if (he && !punted) {
866       zds = (void*)he->data;
867       *cache_hit = !zds->invalid;
868       if (recycled) {
869          if (zds->invalid)
870             descriptor_set_invalidate(zds);
871          /* need to migrate this entry back to the in-use hash */
872          _mesa_hash_table_remove(pool->free_desc_sets, he);
873          goto out;
874       }
875       goto quick_out;
876    }
877 skip_hash_tables:
878    if (util_dynarray_num_elements(&pool->alloc_desc_sets, struct zink_descriptor_set *)) {
879       /* grab one off the allocated array */
880       zds = util_dynarray_pop(&pool->alloc_desc_sets, struct zink_descriptor_set *);
881       goto out;
882    }
883 
884    if (_mesa_hash_table_num_entries(pool->free_desc_sets)) {
885       /* try for an invalidated set first */
886       unsigned count = 0;
887       hash_table_foreach(pool->free_desc_sets, he) {
888          struct zink_descriptor_set *tmp = he->data;
889          if ((count++ >= 100 && tmp->reference.count == 1) || get_invalidated_desc_set(he->data)) {
890             zds = tmp;
891             assert(p_atomic_read(&zds->reference.count) == 1);
892             descriptor_set_invalidate(zds);
893             _mesa_hash_table_remove(pool->free_desc_sets, he);
894             goto out;
895          }
896       }
897    }
898 
899    assert(pool->num_sets_allocated < ZINK_DEFAULT_MAX_DESCS);
900 
901    zds = allocate_desc_set(ctx, pg, type, descs_used, is_compute);
902 out:
903    if (unlikely(pool->num_sets_allocated >= ZINK_DEFAULT_DESC_CLAMP &&
904                 _mesa_hash_table_num_entries(pool->free_desc_sets) < ZINK_DEFAULT_MAX_DESCS - ZINK_DEFAULT_DESC_CLAMP))
905       ctx->oom_flush = ctx->oom_stall = true;
906    zds->hash = hash;
907    if (screen->compact_descriptors)
908       populate_zds_key_compact(ctx, type, is_compute, &zds->key, pg->dd->push_usage);
909    else
910       populate_zds_key(ctx, type, is_compute, &zds->key, pg->dd->push_usage);
911    zds->recycled = false;
912    _mesa_hash_table_insert_pre_hashed(pool->desc_sets, hash, &zds->key, zds);
913 quick_out:
914    if (!push_set) {
915       if (screen->compact_descriptors) {
916          if (zink_desc_type_from_vktype(pool->key->sizes[0].type) == type)
917             zds->compacted |= BITFIELD_BIT(type);
918          for (unsigned i = 0; i < pool->key->num_type_sizes; i++) {
919             if (zink_desc_type_from_vktype(pool->key->sizes[0].type) == type + ZINK_DESCRIPTOR_COMPACT) {
920                zds->compacted |= BITFIELD_BIT(type + ZINK_DESCRIPTOR_COMPACT);
921                break;
922             }
923          }
924       } else
925          zds->compacted |= BITFIELD_BIT(type);
926    }
927    zds->punted = zds->invalid = false;
928    batch_add_desc_set(batch, zds);
929    if (push_set)
930       ctx->dd->last_set[is_compute] = zds;
931    else
932       pdd_cached(pg)->last_set[type] = zds;
933    simple_mtx_unlock(&pool->mtx);
934 
935    return zds;
936 }
937 
938 void
zink_descriptor_set_recycle(struct zink_descriptor_set * zds)939 zink_descriptor_set_recycle(struct zink_descriptor_set *zds)
940 {
941    struct zink_descriptor_pool *pool = zds->pool;
942    /* if desc set is still in use by a batch, don't recache */
943    uint32_t refcount = p_atomic_read(&zds->reference.count);
944    if (refcount != 1)
945       return;
946    /* this is a null set */
947    if (!pool->key->layout->num_bindings)
948       return;
949    simple_mtx_lock(&pool->mtx);
950    if (zds->punted)
951       zds->invalid = true;
952    else {
953       /* if we've previously punted this set, then it won't have a hash or be in either of the tables */
954       struct hash_entry *he = _mesa_hash_table_search_pre_hashed(pool->desc_sets, zds->hash, &zds->key);
955       if (!he) {
956          /* desc sets can be used multiple times in the same batch */
957          simple_mtx_unlock(&pool->mtx);
958          return;
959       }
960       _mesa_hash_table_remove(pool->desc_sets, he);
961    }
962 
963    if (zds->invalid) {
964       descriptor_set_invalidate(zds);
965       util_dynarray_append(&pool->alloc_desc_sets, struct zink_descriptor_set *, zds);
966    } else {
967       zds->recycled = true;
968       _mesa_hash_table_insert_pre_hashed(pool->free_desc_sets, zds->hash, &zds->key, zds);
969    }
970    simple_mtx_unlock(&pool->mtx);
971 }
972 
973 
974 static void
desc_set_ref_add(struct zink_descriptor_set * zds,struct zink_descriptor_refs * refs,void ** ref_ptr,void * ptr)975 desc_set_ref_add(struct zink_descriptor_set *zds, struct zink_descriptor_refs *refs, void **ref_ptr, void *ptr)
976 {
977    struct zink_descriptor_reference ref = {ref_ptr, &zds->invalid};
978    *ref_ptr = ptr;
979    if (ptr)
980       util_dynarray_append(&refs->refs, struct zink_descriptor_reference, ref);
981 }
982 
983 static void
zink_descriptor_surface_desc_set_add(struct zink_descriptor_surface * dsurf,struct zink_descriptor_set * zds,unsigned idx)984 zink_descriptor_surface_desc_set_add(struct zink_descriptor_surface *dsurf, struct zink_descriptor_set *zds, unsigned idx)
985 {
986    assert(idx < zds->num_resources);
987    zds->surfaces[idx].is_buffer = dsurf->is_buffer;
988    if (dsurf->is_buffer)
989       desc_set_ref_add(zds, &dsurf->bufferview->desc_set_refs, (void**)&zds->surfaces[idx].bufferview, dsurf->bufferview);
990    else
991       desc_set_ref_add(zds, &dsurf->surface->desc_set_refs, (void**)&zds->surfaces[idx].surface, dsurf->surface);
992 }
993 
994 static void
zink_image_view_desc_set_add(struct zink_image_view * image_view,struct zink_descriptor_set * zds,unsigned idx,bool is_buffer)995 zink_image_view_desc_set_add(struct zink_image_view *image_view, struct zink_descriptor_set *zds, unsigned idx, bool is_buffer)
996 {
997    assert(idx < zds->num_resources);
998    if (is_buffer)
999       desc_set_ref_add(zds, &image_view->buffer_view->desc_set_refs, (void**)&zds->surfaces[idx].bufferview, image_view->buffer_view);
1000    else
1001       desc_set_ref_add(zds, &image_view->surface->desc_set_refs, (void**)&zds->surfaces[idx].surface, image_view->surface);
1002 }
1003 
1004 static void
zink_sampler_state_desc_set_add(struct zink_sampler_state * sampler_state,struct zink_descriptor_set * zds,unsigned idx)1005 zink_sampler_state_desc_set_add(struct zink_sampler_state *sampler_state, struct zink_descriptor_set *zds, unsigned idx)
1006 {
1007    assert(idx < zds->num_resources);
1008    if (sampler_state)
1009       desc_set_ref_add(zds, &sampler_state->desc_set_refs, (void**)&zds->sampler_states[idx], sampler_state);
1010    else
1011       zds->sampler_states[idx] = NULL;
1012 }
1013 
1014 static void
zink_resource_desc_set_add(struct zink_resource * res,struct zink_descriptor_set * zds,unsigned idx)1015 zink_resource_desc_set_add(struct zink_resource *res, struct zink_descriptor_set *zds, unsigned idx)
1016 {
1017    assert(idx < zds->num_resources);
1018    desc_set_ref_add(zds, res ? &res->obj->desc_set_refs : NULL, (void**)&zds->res_objs[idx], res ? res->obj : NULL);
1019 }
1020 
1021 void
zink_descriptor_set_refs_clear(struct zink_descriptor_refs * refs,void * ptr)1022 zink_descriptor_set_refs_clear(struct zink_descriptor_refs *refs, void *ptr)
1023 {
1024    util_dynarray_foreach(&refs->refs, struct zink_descriptor_reference, ref) {
1025       if (*ref->ref == ptr) {
1026          *ref->invalid = true;
1027          *ref->ref = NULL;
1028       }
1029    }
1030    util_dynarray_fini(&refs->refs);
1031 }
1032 
1033 static inline void
zink_descriptor_pool_reference(struct zink_context * ctx,struct zink_descriptor_pool ** dst,struct zink_descriptor_pool * src)1034 zink_descriptor_pool_reference(struct zink_context *ctx,
1035                                struct zink_descriptor_pool **dst,
1036                                struct zink_descriptor_pool *src)
1037 {
1038    struct zink_descriptor_pool *old_dst = dst ? *dst : NULL;
1039 
1040    if (pipe_reference_described(old_dst ? &old_dst->reference : NULL, &src->reference,
1041                                 (debug_reference_descriptor)debug_describe_zink_descriptor_pool))
1042       descriptor_pool_delete(ctx, old_dst);
1043    if (dst) *dst = src;
1044 }
1045 
1046 static void
create_descriptor_ref_template(struct zink_context * ctx,struct zink_program * pg)1047 create_descriptor_ref_template(struct zink_context *ctx, struct zink_program *pg)
1048 {
1049    struct zink_shader **stages;
1050    if (pg->is_compute)
1051       stages = &((struct zink_compute_program*)pg)->shader;
1052    else
1053       stages = ((struct zink_gfx_program*)pg)->shaders;
1054    unsigned num_shaders = pg->is_compute ? 1 : ZINK_SHADER_COUNT;
1055 
1056    for (unsigned type = 0; type < ZINK_DESCRIPTOR_TYPES; type++) {
1057       for (int i = 0; i < num_shaders; i++) {
1058          struct zink_shader *shader = stages[i];
1059          if (!shader)
1060             continue;
1061 
1062          for (int j = 0; j < shader->num_bindings[type]; j++) {
1063              int index = shader->bindings[type][j].index;
1064              if (type == ZINK_DESCRIPTOR_TYPE_UBO && !index)
1065                 continue;
1066              pdd_cached(pg)->num_refs[type] += shader->bindings[type][j].size;
1067          }
1068       }
1069 
1070       if (!pdd_cached(pg)->num_refs[type])
1071          continue;
1072 
1073       pdd_cached(pg)->refs[type] = ralloc_array(pg->dd, union zink_program_descriptor_refs, pdd_cached(pg)->num_refs[type]);
1074       if (!pdd_cached(pg)->refs[type])
1075          return;
1076 
1077       unsigned ref_idx = 0;
1078       for (int i = 0; i < num_shaders; i++) {
1079          struct zink_shader *shader = stages[i];
1080          if (!shader)
1081             continue;
1082 
1083          enum pipe_shader_type stage = pipe_shader_type_from_mesa(shader->nir->info.stage);
1084          for (int j = 0; j < shader->num_bindings[type]; j++) {
1085             int index = shader->bindings[type][j].index;
1086             for (unsigned k = 0; k < shader->bindings[type][j].size; k++) {
1087                switch (type) {
1088                case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
1089                   pdd_cached(pg)->refs[type][ref_idx].sampler.sampler_state = (struct zink_sampler_state**)&ctx->sampler_states[stage][index + k];
1090                   pdd_cached(pg)->refs[type][ref_idx].sampler.dsurf = &ctx->di.sampler_surfaces[stage][index + k];
1091                   break;
1092                case ZINK_DESCRIPTOR_TYPE_IMAGE:
1093                   pdd_cached(pg)->refs[type][ref_idx].dsurf = &ctx->di.image_surfaces[stage][index + k];
1094                   break;
1095                case ZINK_DESCRIPTOR_TYPE_UBO:
1096                   if (!index)
1097                      continue;
1098                   FALLTHROUGH;
1099                default:
1100                   pdd_cached(pg)->refs[type][ref_idx].res = &ctx->di.descriptor_res[type][stage][index + k];
1101                   break;
1102                }
1103                assert(ref_idx < pdd_cached(pg)->num_refs[type]);
1104                ref_idx++;
1105             }
1106          }
1107       }
1108    }
1109 }
1110 
1111 bool
zink_descriptor_program_init(struct zink_context * ctx,struct zink_program * pg)1112 zink_descriptor_program_init(struct zink_context *ctx, struct zink_program *pg)
1113 {
1114    struct zink_screen *screen = zink_screen(ctx->base.screen);
1115 
1116    pg->dd = (void*)rzalloc(pg, struct zink_program_descriptor_data_cached);
1117    if (!pg->dd)
1118       return false;
1119 
1120    if (!zink_descriptor_program_init_lazy(ctx, pg))
1121       return false;
1122 
1123    /* no descriptors */
1124    if (!pg->dd)
1125       return true;
1126 
1127    bool has_pools = false;
1128    for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
1129       if (!pg->dd->pool_key[i])
1130          continue;
1131 
1132       const struct zink_descriptor_pool_key *pool_key = pg->dd->pool_key[i];
1133       struct zink_descriptor_pool *pool = descriptor_pool_get(ctx, i, pool_key);
1134       if (!pool)
1135          return false;
1136       pdd_cached(pg)->pool[i] = pool;
1137       has_pools = true;
1138    }
1139    if (has_pools && screen->info.have_KHR_descriptor_update_template &&
1140        zink_descriptor_mode != ZINK_DESCRIPTOR_MODE_NOTEMPLATES)
1141       create_descriptor_ref_template(ctx, pg);
1142 
1143    return true;
1144 }
1145 
1146 void
zink_descriptor_program_deinit(struct zink_context * ctx,struct zink_program * pg)1147 zink_descriptor_program_deinit(struct zink_context *ctx, struct zink_program *pg)
1148 {
1149    if (!pg->dd)
1150       return;
1151    for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++)
1152       zink_descriptor_pool_reference(ctx, &pdd_cached(pg)->pool[i], NULL);
1153 
1154    zink_descriptor_program_deinit_lazy(ctx, pg);
1155 }
1156 
1157 static void
zink_descriptor_pool_deinit(struct zink_context * ctx)1158 zink_descriptor_pool_deinit(struct zink_context *ctx)
1159 {
1160    for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
1161       /* do not free: programs own these pools */
1162       _mesa_hash_table_destroy(ctx->dd->descriptor_pools[i], NULL);
1163    }
1164    descriptor_pool_free(zink_screen(ctx->base.screen), ctx->dd->push_pool[0]);
1165    descriptor_pool_free(zink_screen(ctx->base.screen), ctx->dd->push_pool[1]);
1166 }
1167 
1168 static bool
zink_descriptor_pool_init(struct zink_context * ctx)1169 zink_descriptor_pool_init(struct zink_context *ctx)
1170 {
1171    for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
1172       ctx->dd->descriptor_pools[i] = _mesa_hash_table_create(ctx, hash_descriptor_pool_key, equals_descriptor_pool_key);
1173       if (!ctx->dd->descriptor_pools[i])
1174          return false;
1175    }
1176    VkDescriptorPoolSize sizes[2];
1177    sizes[0].type = zink_descriptor_mode == ZINK_DESCRIPTOR_MODE_LAZY ? VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER : VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
1178    sizes[0].descriptorCount = ZINK_SHADER_COUNT * ZINK_DEFAULT_MAX_DESCS;
1179    sizes[1].type = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
1180    sizes[1].descriptorCount = ZINK_DEFAULT_MAX_DESCS;
1181    /* these are freed by ralloc */
1182    struct zink_descriptor_pool_key *pool_key;
1183    pool_key = zink_descriptor_util_pool_key_get(ctx, ZINK_DESCRIPTOR_TYPES, ctx->dd->push_layout_keys[0], sizes, ctx->dd->has_fbfetch ? 2 : 1);
1184    ctx->dd->push_pool[0] = descriptor_pool_get(ctx, 0, pool_key);
1185    sizes[0].descriptorCount = ZINK_DEFAULT_MAX_DESCS;
1186    pool_key = zink_descriptor_util_pool_key_get(ctx, ZINK_DESCRIPTOR_TYPES, ctx->dd->push_layout_keys[1], sizes, 1);
1187    ctx->dd->push_pool[1] = descriptor_pool_get(ctx, 0, pool_key);
1188    return ctx->dd->push_pool[0] && ctx->dd->push_pool[1];
1189 }
1190 
1191 
1192 static void
desc_set_res_add(struct zink_descriptor_set * zds,struct zink_resource * res,unsigned int i,bool cache_hit)1193 desc_set_res_add(struct zink_descriptor_set *zds, struct zink_resource *res, unsigned int i, bool cache_hit)
1194 {
1195    /* if we got a cache hit, we have to verify that the cached set is still valid;
1196     * we store the vk resource to the set here to avoid a more complex and costly mechanism of maintaining a
1197     * hash table on every resource with the associated descriptor sets that then needs to be iterated through
1198     * whenever a resource is destroyed
1199     */
1200    assert(!cache_hit || zds->res_objs[i] == (res ? res->obj : NULL));
1201    if (!cache_hit)
1202       zink_resource_desc_set_add(res, zds, i);
1203 }
1204 
1205 static void
desc_set_sampler_add(struct zink_context * ctx,struct zink_descriptor_set * zds,struct zink_descriptor_surface * dsurf,struct zink_sampler_state * state,unsigned int i,bool cache_hit)1206 desc_set_sampler_add(struct zink_context *ctx, struct zink_descriptor_set *zds, struct zink_descriptor_surface *dsurf,
1207                      struct zink_sampler_state *state, unsigned int i, bool cache_hit)
1208 {
1209    /* if we got a cache hit, we have to verify that the cached set is still valid;
1210     * we store the vk resource to the set here to avoid a more complex and costly mechanism of maintaining a
1211     * hash table on every resource with the associated descriptor sets that then needs to be iterated through
1212     * whenever a resource is destroyed
1213     */
1214 #ifndef NDEBUG
1215    uint32_t cur_hash = get_descriptor_surface_hash(ctx, &zds->surfaces[i]);
1216    uint32_t new_hash = get_descriptor_surface_hash(ctx, dsurf);
1217 #endif
1218    assert(!cache_hit || cur_hash == new_hash);
1219    assert(!cache_hit || zds->sampler_states[i] == state);
1220    if (!cache_hit) {
1221       zink_descriptor_surface_desc_set_add(dsurf, zds, i);
1222       zink_sampler_state_desc_set_add(state, zds, i);
1223    }
1224 }
1225 
1226 static void
desc_set_image_add(struct zink_context * ctx,struct zink_descriptor_set * zds,struct zink_descriptor_surface * dsurf,unsigned int i,bool cache_hit)1227 desc_set_image_add(struct zink_context *ctx, struct zink_descriptor_set *zds, struct zink_descriptor_surface *dsurf,
1228                    unsigned int i, bool cache_hit)
1229 {
1230    /* if we got a cache hit, we have to verify that the cached set is still valid;
1231     * we store the vk resource to the set here to avoid a more complex and costly mechanism of maintaining a
1232     * hash table on every resource with the associated descriptor sets that then needs to be iterated through
1233     * whenever a resource is destroyed
1234     */
1235 #ifndef NDEBUG
1236    uint32_t cur_hash = get_descriptor_surface_hash(ctx, &zds->surfaces[i]);
1237    uint32_t new_hash = get_descriptor_surface_hash(ctx, dsurf);
1238 #endif
1239    assert(!cache_hit || cur_hash == new_hash);
1240    if (!cache_hit)
1241       zink_descriptor_surface_desc_set_add(dsurf, zds, i);
1242 }
1243 
1244 static void
desc_set_descriptor_surface_add(struct zink_context * ctx,struct zink_descriptor_set * zds,struct zink_descriptor_surface * dsurf,unsigned int i,bool cache_hit)1245 desc_set_descriptor_surface_add(struct zink_context *ctx, struct zink_descriptor_set *zds, struct zink_descriptor_surface *dsurf,
1246                    unsigned int i, bool cache_hit)
1247 {
1248    /* if we got a cache hit, we have to verify that the cached set is still valid;
1249     * we store the vk resource to the set here to avoid a more complex and costly mechanism of maintaining a
1250     * hash table on every resource with the associated descriptor sets that then needs to be iterated through
1251     * whenever a resource is destroyed
1252     */
1253 #ifndef NDEBUG
1254    uint32_t cur_hash = get_descriptor_surface_hash(ctx, &zds->surfaces[i]);
1255    uint32_t new_hash = get_descriptor_surface_hash(ctx, dsurf);
1256 #endif
1257    assert(!cache_hit || cur_hash == new_hash);
1258    if (!cache_hit)
1259       zink_descriptor_surface_desc_set_add(dsurf, zds, i);
1260 }
1261 
1262 static unsigned
init_write_descriptor(struct zink_shader * shader,VkDescriptorSet desc_set,enum zink_descriptor_type type,int idx,VkWriteDescriptorSet * wd,unsigned num_wds)1263 init_write_descriptor(struct zink_shader *shader, VkDescriptorSet desc_set, enum zink_descriptor_type type, int idx, VkWriteDescriptorSet *wd, unsigned num_wds)
1264 {
1265     wd->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
1266     wd->pNext = NULL;
1267     wd->dstBinding = shader ? shader->bindings[type][idx].binding : idx;
1268     wd->dstArrayElement = 0;
1269     wd->descriptorCount = shader ? shader->bindings[type][idx].size : 1;
1270     wd->descriptorType = shader ? shader->bindings[type][idx].type :
1271                                   idx == ZINK_FBFETCH_BINDING ? VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT : VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
1272     wd->dstSet = desc_set;
1273     return num_wds + 1;
1274 }
1275 
1276 static unsigned
update_push_ubo_descriptors(struct zink_context * ctx,struct zink_descriptor_set * zds,VkDescriptorSet desc_set,bool is_compute,bool cache_hit,uint32_t * dynamic_offsets)1277 update_push_ubo_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds,
1278                             VkDescriptorSet desc_set,
1279                             bool is_compute, bool cache_hit, uint32_t *dynamic_offsets)
1280 {
1281    struct zink_screen *screen = zink_screen(ctx->base.screen);
1282    VkWriteDescriptorSet wds[ZINK_SHADER_COUNT + 1];
1283    VkDescriptorBufferInfo buffer_infos[ZINK_SHADER_COUNT];
1284    struct zink_shader **stages;
1285    bool fbfetch = false;
1286 
1287    unsigned num_stages = is_compute ? 1 : ZINK_SHADER_COUNT;
1288    struct zink_program *pg = is_compute ? &ctx->curr_compute->base : &ctx->curr_program->base;
1289    if (is_compute)
1290       stages = &ctx->curr_compute->shader;
1291    else
1292       stages = &ctx->gfx_stages[0];
1293 
1294    for (int i = 0; i < num_stages; i++) {
1295       struct zink_shader *shader = stages[i];
1296       enum pipe_shader_type pstage = shader ? pipe_shader_type_from_mesa(shader->nir->info.stage) : i;
1297       VkDescriptorBufferInfo *info = &ctx->di.ubos[pstage][0];
1298       unsigned dynamic_idx = is_compute ? 0 : tgsi_processor_to_shader_stage(pstage);
1299 
1300       /* Values are taken from pDynamicOffsets in an order such that all entries for set N come before set N+1;
1301        * within a set, entries are ordered by the binding numbers in the descriptor set layouts
1302        * - vkCmdBindDescriptorSets spec
1303        *
1304        * because of this, we have to populate the dynamic offsets by their shader stage to ensure they
1305        * match what the driver expects
1306        */
1307       const bool used = (pg->dd->push_usage & BITFIELD_BIT(pstage)) == BITFIELD_BIT(pstage);
1308       dynamic_offsets[dynamic_idx] = used ? info->offset : 0;
1309       if (!cache_hit) {
1310          init_write_descriptor(NULL, desc_set, ZINK_DESCRIPTOR_TYPE_UBO, tgsi_processor_to_shader_stage(pstage), &wds[i], 0);
1311          if (used) {
1312             if (zds)
1313                desc_set_res_add(zds, ctx->di.descriptor_res[ZINK_DESCRIPTOR_TYPE_UBO][pstage][0], i, cache_hit);
1314             buffer_infos[i].buffer = info->buffer;
1315             buffer_infos[i].range = info->range;
1316          } else {
1317             if (zds)
1318                desc_set_res_add(zds, NULL, i, cache_hit);
1319             if (unlikely(!screen->info.rb2_feats.nullDescriptor))
1320                buffer_infos[i].buffer = zink_resource(ctx->dummy_vertex_buffer)->obj->buffer;
1321             else
1322                buffer_infos[i].buffer = VK_NULL_HANDLE;
1323             buffer_infos[i].range = VK_WHOLE_SIZE;
1324          }
1325          /* these are dynamic UBO descriptors, so we have to always set 0 as the descriptor offset */
1326          buffer_infos[i].offset = 0;
1327          wds[i].pBufferInfo = &buffer_infos[i];
1328       }
1329    }
1330    if (unlikely(!cache_hit && !is_compute && ctx->dd->has_fbfetch)) {
1331       init_write_descriptor(NULL, desc_set, 0, MESA_SHADER_STAGES, &wds[ZINK_SHADER_COUNT], 0);
1332       wds[ZINK_SHADER_COUNT].pImageInfo = &ctx->di.fbfetch;
1333       fbfetch = true;
1334    }
1335 
1336    if (!cache_hit)
1337       VKSCR(UpdateDescriptorSets)(screen->dev, num_stages + !!fbfetch, wds, 0, NULL);
1338    return num_stages;
1339 }
1340 
1341 static void
set_descriptor_set_refs(struct zink_context * ctx,struct zink_descriptor_set * zds,struct zink_program * pg,bool cache_hit)1342 set_descriptor_set_refs(struct zink_context *ctx, struct zink_descriptor_set *zds, struct zink_program *pg, bool cache_hit)
1343 {
1344    const bool compact_descriptors = zink_screen(ctx->base.screen)->compact_descriptors;
1345    STATIC_ASSERT(ZINK_DESCRIPTOR_TYPE_UBO + ZINK_DESCRIPTOR_COMPACT == ZINK_DESCRIPTOR_TYPE_SSBO);
1346    STATIC_ASSERT(ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW + ZINK_DESCRIPTOR_COMPACT == ZINK_DESCRIPTOR_TYPE_IMAGE);
1347    const enum zink_descriptor_type types[] = {zds->pool->type, zds->pool->type + ZINK_DESCRIPTOR_COMPACT};
1348    unsigned num_types = compact_descriptors ? 2 : 1;
1349    for (unsigned n = 0; n < num_types; n++) {
1350       const enum zink_descriptor_type type = types[n];
1351       for (unsigned i = 0; i < pdd_cached(pg)->num_refs[type]; i++) {
1352          switch (type) {
1353          case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
1354             desc_set_sampler_add(ctx, zds, pdd_cached(pg)->refs[type][i].sampler.dsurf,
1355                                            *pdd_cached(pg)->refs[type][i].sampler.sampler_state, i, cache_hit);
1356             break;
1357          case ZINK_DESCRIPTOR_TYPE_IMAGE:
1358             desc_set_descriptor_surface_add(ctx, zds, pdd_cached(pg)->refs[type][i].dsurf, i, cache_hit);
1359             break;
1360          default:
1361             desc_set_res_add(zds, *pdd_cached(pg)->refs[type][i].res, i, cache_hit);
1362             break;
1363          }
1364       }
1365    }
1366 }
1367 
1368 static void
update_descriptors_internal(struct zink_context * ctx,enum zink_descriptor_type type,struct zink_descriptor_set * zds,struct zink_program * pg,bool cache_hit)1369 update_descriptors_internal(struct zink_context *ctx, enum zink_descriptor_type type, struct zink_descriptor_set *zds, struct zink_program *pg, bool cache_hit)
1370 {
1371    struct zink_screen *screen = zink_screen(ctx->base.screen);
1372    struct zink_shader **stages;
1373 
1374    unsigned num_stages = pg->is_compute ? 1 : ZINK_SHADER_COUNT;
1375    if (pg->is_compute)
1376       stages = &ctx->curr_compute->shader;
1377    else
1378       stages = &ctx->gfx_stages[0];
1379 
1380    if (cache_hit || !zds)
1381       return;
1382 
1383    if (screen->info.have_KHR_descriptor_update_template &&
1384        zink_descriptor_mode != ZINK_DESCRIPTOR_MODE_NOTEMPLATES) {
1385       set_descriptor_set_refs(ctx, zds, pg, cache_hit);
1386       zink_descriptor_set_update_lazy(ctx, pg, type, zds->desc_set);
1387       return;
1388    }
1389 
1390    unsigned num_resources = 0;
1391    ASSERTED unsigned num_bindings = zds->pool->num_resources;
1392    VkWriteDescriptorSet wds[ZINK_MAX_DESCRIPTORS_PER_TYPE];
1393    unsigned num_wds = 0;
1394 
1395    const enum zink_descriptor_type types[2] = {type, type + ZINK_DESCRIPTOR_COMPACT};
1396    for (unsigned n = 0; n < ARRAY_SIZE(types); n++) {
1397       if (!(zds->compacted & BITFIELD_BIT(types[n])))
1398          continue;
1399       type = types[n];
1400       for (int i = 0; i < num_stages; i++) {
1401          struct zink_shader *shader = stages[i];
1402          if (!shader)
1403             continue;
1404          enum pipe_shader_type stage = pipe_shader_type_from_mesa(shader->nir->info.stage);
1405          for (int j = 0; j < shader->num_bindings[type]; j++) {
1406             int index = shader->bindings[type][j].index;
1407             switch (type) {
1408             case ZINK_DESCRIPTOR_TYPE_UBO:
1409                if (!index)
1410                   continue;
1411             FALLTHROUGH;
1412             case ZINK_DESCRIPTOR_TYPE_SSBO: {
1413                VkDescriptorBufferInfo *info;
1414                struct zink_resource *res = ctx->di.descriptor_res[type][stage][index];
1415                if (type == ZINK_DESCRIPTOR_TYPE_UBO)
1416                   info = &ctx->di.ubos[stage][index];
1417                else
1418                   info = &ctx->di.ssbos[stage][index];
1419                assert(num_resources < num_bindings);
1420                desc_set_res_add(zds, res, num_resources++, cache_hit);
1421                wds[num_wds].pBufferInfo = info;
1422             }
1423             break;
1424             case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
1425             case ZINK_DESCRIPTOR_TYPE_IMAGE: {
1426                VkDescriptorImageInfo *image_info;
1427                VkBufferView *buffer_info;
1428                if (type == ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW) {
1429                   image_info = &ctx->di.textures[stage][index];
1430                   buffer_info = &ctx->di.tbos[stage][index];
1431                } else {
1432                   image_info = &ctx->di.images[stage][index];
1433                   buffer_info = &ctx->di.texel_images[stage][index];
1434                }
1435                bool is_buffer = zink_shader_descriptor_is_buffer(shader, type, j);
1436                for (unsigned k = 0; k < shader->bindings[type][j].size; k++) {
1437                   assert(num_resources < num_bindings);
1438                   if (type == ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW) {
1439                      struct zink_sampler_state *sampler = NULL;
1440                      if (!is_buffer && image_info->imageView)
1441                         sampler = ctx->sampler_states[stage][index + k];;
1442 
1443                      desc_set_sampler_add(ctx, zds, &ctx->di.sampler_surfaces[stage][index + k], sampler, num_resources++, cache_hit);
1444                   } else {
1445                      desc_set_image_add(ctx, zds, &ctx->di.image_surfaces[stage][index + k], num_resources++, cache_hit);
1446                   }
1447                }
1448                if (is_buffer)
1449                   wds[num_wds].pTexelBufferView = buffer_info;
1450                else
1451                   wds[num_wds].pImageInfo = image_info;
1452             }
1453             break;
1454             default:
1455                unreachable("unknown descriptor type");
1456             }
1457             num_wds = init_write_descriptor(shader, zds->desc_set, type, j, &wds[num_wds], num_wds);
1458          }
1459       }
1460    }
1461    if (num_wds)
1462       VKSCR(UpdateDescriptorSets)(screen->dev, num_wds, wds, 0, NULL);
1463 }
1464 
1465 static void
1466 zink_context_update_descriptor_states(struct zink_context *ctx, struct zink_program *pg);
1467 
1468 #define MAX_CACHE_MISSES 50
1469 
1470 void
zink_descriptors_update(struct zink_context * ctx,bool is_compute)1471 zink_descriptors_update(struct zink_context *ctx, bool is_compute)
1472 {
1473    struct zink_program *pg = is_compute ? (struct zink_program *)ctx->curr_compute : (struct zink_program *)ctx->curr_program;
1474 
1475    if (ctx->dd->pg[is_compute] != pg) {
1476       for (int h = 0; h < ZINK_DESCRIPTOR_TYPES; h++) {
1477          if (pg->dd->real_binding_usage & BITFIELD_BIT(h))
1478             ctx->dd->changed[is_compute][h] = true;
1479          ctx->dd->descriptor_states[is_compute].valid[h] = false;
1480          if (!is_compute) {
1481             for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++)
1482                ctx->dd->gfx_descriptor_states[i].valid[h] = false;
1483          }
1484       }
1485    }
1486    zink_context_update_descriptor_states(ctx, pg);
1487    bool cache_hit;
1488    VkDescriptorSet desc_set = VK_NULL_HANDLE;
1489    struct zink_descriptor_set *zds = NULL;
1490 
1491    struct zink_batch *batch = &ctx->batch;
1492    VkPipelineBindPoint bp = is_compute ? VK_PIPELINE_BIND_POINT_COMPUTE : VK_PIPELINE_BIND_POINT_GRAPHICS;
1493 
1494    {
1495       uint32_t dynamic_offsets[PIPE_MAX_CONSTANT_BUFFERS];
1496       unsigned dynamic_offset_idx = 0;
1497 
1498       /* push set is indexed in vulkan as 0 but isn't in the general pool array */
1499       ctx->dd->changed[is_compute][ZINK_DESCRIPTOR_TYPES] |= ctx->dd->pg[is_compute] != pg;
1500       if (pg->dd->push_usage) {
1501          if (pg->dd->fbfetch) {
1502             /* fbfetch is not cacheable: grab a lazy set because it's faster */
1503             cache_hit = false;
1504             desc_set = zink_descriptors_alloc_lazy_push(ctx);
1505          } else {
1506             zds = zink_descriptor_set_get(ctx, ZINK_DESCRIPTOR_TYPES, is_compute, &cache_hit);
1507             desc_set = zds ? zds->desc_set : VK_NULL_HANDLE;
1508          }
1509       } else {
1510          cache_hit = false;
1511       }
1512       ctx->dd->changed[is_compute][ZINK_DESCRIPTOR_TYPES] = false;
1513       if (desc_set) {
1514          if (pg->dd->push_usage) // push set
1515             dynamic_offset_idx = update_push_ubo_descriptors(ctx, zds, desc_set,
1516                                                              is_compute, cache_hit, dynamic_offsets);
1517          VKCTX(CmdBindDescriptorSets)(batch->state->cmdbuf, bp,
1518                                  pg->layout, 0, 1, &desc_set,
1519                                  dynamic_offset_idx, dynamic_offsets);
1520       }
1521    }
1522 
1523    {
1524       for (int h = 0; h < ZINK_DESCRIPTOR_TYPES; h++) {
1525          if (pdd_cached(pg)->cache_misses[h] < MAX_CACHE_MISSES) {
1526             if (pg->dsl[h + 1]) {
1527                /* null set has null pool */
1528                if (pdd_cached(pg)->pool[h]) {
1529                   zds = zink_descriptor_set_get(ctx, h, is_compute, &cache_hit);
1530                   if (cache_hit) {
1531                      pdd_cached(pg)->cache_misses[h] = 0;
1532                   } else {
1533                      if (++pdd_cached(pg)->cache_misses[h] == MAX_CACHE_MISSES) {
1534 #ifdef PRINT_DEBUG
1535                         const char *set_names[] = {
1536                            "UBO",
1537                            "TEXTURES",
1538                            "SSBO",
1539                            "IMAGES",
1540                         };
1541                         debug_printf("zink: descriptor cache exploded for prog %p set %s: getting lazy (not a bug, just lettin you know)\n", pg, set_names[h]);
1542 #endif
1543                      }
1544                   }
1545                } else
1546                   zds = NULL;
1547                if (zds) {
1548                   desc_set = zds->desc_set;
1549                   update_descriptors_internal(ctx, h, zds, pg, cache_hit);
1550 
1551                   VKCTX(CmdBindDescriptorSets)(batch->state->cmdbuf, bp,
1552                                                pg->layout, h + 1, 1, &desc_set,
1553                                                0, NULL);
1554                   if (pdd_cached(pg)->cache_misses[h] == MAX_CACHE_MISSES)
1555                      zink_descriptor_pool_reference(ctx, &pdd_cached(pg)->pool[h], NULL);
1556                }
1557             }
1558          } else {
1559             zink_descriptors_update_lazy_masked(ctx, is_compute, BITFIELD_BIT(h), 0);
1560          }
1561          ctx->dd->changed[is_compute][h] = false;
1562       }
1563    }
1564    ctx->dd->pg[is_compute] = pg;
1565 
1566    if (pg->dd->bindless && unlikely(!ctx->dd->bindless_bound)) {
1567       VKCTX(CmdBindDescriptorSets)(batch->state->cmdbuf, bp,
1568                                    pg->layout, ZINK_DESCRIPTOR_BINDLESS, 1, &ctx->dd->bindless_set,
1569                                    0, NULL);
1570       ctx->dd->bindless_bound = true;
1571    }
1572 }
1573 
1574 void
zink_batch_descriptor_deinit(struct zink_screen * screen,struct zink_batch_state * bs)1575 zink_batch_descriptor_deinit(struct zink_screen *screen, struct zink_batch_state *bs)
1576 {
1577    if (!bs->dd)
1578       return;
1579    _mesa_set_destroy(bs->dd->desc_sets, NULL);
1580    zink_batch_descriptor_deinit_lazy(screen, bs);
1581 }
1582 
1583 void
zink_batch_descriptor_reset(struct zink_screen * screen,struct zink_batch_state * bs)1584 zink_batch_descriptor_reset(struct zink_screen *screen, struct zink_batch_state *bs)
1585 {
1586    set_foreach(bs->dd->desc_sets, entry) {
1587       struct zink_descriptor_set *zds = (void*)entry->key;
1588       zink_batch_usage_unset(&zds->batch_uses, bs);
1589       /* reset descriptor pools when no bs is using this program to avoid
1590        * having some inactive program hogging a billion descriptors
1591        */
1592       pipe_reference(&zds->reference, NULL);
1593       zink_descriptor_set_recycle(zds);
1594       if (zds->reference.count == 1) {
1595          struct zink_descriptor_pool *pool = zds->pool;
1596          zink_descriptor_pool_reference(bs->ctx, &pool, NULL);
1597       }
1598       _mesa_set_remove(bs->dd->desc_sets, entry);
1599    }
1600    zink_batch_descriptor_reset_lazy(screen, bs);
1601 }
1602 
1603 bool
zink_batch_descriptor_init(struct zink_screen * screen,struct zink_batch_state * bs)1604 zink_batch_descriptor_init(struct zink_screen *screen, struct zink_batch_state *bs)
1605 {
1606    if (!zink_batch_descriptor_init_lazy(screen, bs))
1607       return false;
1608    bs->dd->desc_sets = _mesa_pointer_set_create(bs);
1609    return !!bs->dd->desc_sets;
1610 }
1611 
1612 static uint32_t
calc_descriptor_state_hash_ubo(struct zink_context * ctx,struct zink_shader * zs,enum pipe_shader_type shader,int i,int idx,uint32_t hash,bool need_offset)1613 calc_descriptor_state_hash_ubo(struct zink_context *ctx, struct zink_shader *zs, enum pipe_shader_type shader, int i, int idx, uint32_t hash, bool need_offset)
1614 {
1615    for (unsigned k = 0; k < zs->bindings[ZINK_DESCRIPTOR_TYPE_UBO][i].size; k++) {
1616       struct zink_resource *res = ctx->di.descriptor_res[ZINK_DESCRIPTOR_TYPE_UBO][shader][idx + k];
1617       struct zink_resource_object *obj = res ? res->obj : NULL;
1618       hash = XXH32(&obj, sizeof(void*), hash);
1619       void *hash_data = &ctx->di.ubos[shader][idx + k].range;
1620       size_t data_size = sizeof(unsigned);
1621       hash = XXH32(hash_data, data_size, hash);
1622       if (need_offset)
1623          hash = XXH32(&ctx->di.ubos[shader][idx + k].offset, sizeof(unsigned), hash);
1624    }
1625    return hash;
1626 }
1627 
1628 static uint32_t
calc_descriptor_state_hash_ssbo(struct zink_context * ctx,struct zink_shader * zs,enum pipe_shader_type shader,int i,int idx,uint32_t hash)1629 calc_descriptor_state_hash_ssbo(struct zink_context *ctx, struct zink_shader *zs, enum pipe_shader_type shader, int i, int idx, uint32_t hash)
1630 {
1631    for (unsigned k = 0; k < zs->bindings[ZINK_DESCRIPTOR_TYPE_SSBO][i].size; k++) {
1632       struct zink_resource *res = ctx->di.descriptor_res[ZINK_DESCRIPTOR_TYPE_SSBO][shader][idx + k];
1633       struct zink_resource_object *obj = res ? res->obj : NULL;
1634       hash = XXH32(&obj, sizeof(void*), hash);
1635       if (obj) {
1636          struct pipe_shader_buffer *ssbo = &ctx->ssbos[shader][idx + k];
1637          hash = XXH32(&ssbo->buffer_offset, sizeof(ssbo->buffer_offset), hash);
1638          hash = XXH32(&ssbo->buffer_size, sizeof(ssbo->buffer_size), hash);
1639          /* compacted sets need a way to differentiate between a buffer bound as a ubo vs ssbo */
1640          if (zink_screen(ctx->base.screen)->compact_descriptors) {
1641             uint32_t writable = ctx->writable_ssbos[shader] & BITFIELD_BIT(idx + k);
1642             hash = XXH32(&writable, sizeof(writable), hash);
1643          }
1644       }
1645    }
1646    return hash;
1647 }
1648 
1649 static uint32_t
calc_descriptor_state_hash_sampler(struct zink_context * ctx,struct zink_shader * zs,enum pipe_shader_type shader,int i,int idx,uint32_t hash)1650 calc_descriptor_state_hash_sampler(struct zink_context *ctx, struct zink_shader *zs, enum pipe_shader_type shader, int i, int idx, uint32_t hash)
1651 {
1652    for (unsigned k = 0; k < zs->bindings[ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW][i].size; k++) {
1653       struct zink_sampler_view *sampler_view = zink_sampler_view(ctx->sampler_views[shader][idx + k]);
1654       bool is_buffer = zink_shader_descriptor_is_buffer(zs, ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW, i);
1655       ctx->di.sampler_surfaces[shader][idx + k].is_buffer = is_buffer;
1656       uint32_t val = zink_get_sampler_view_hash(ctx, sampler_view, is_buffer);
1657       hash = XXH32(&val, sizeof(uint32_t), hash);
1658       if (is_buffer)
1659          continue;
1660 
1661       hash = XXH32(&ctx->di.textures[shader][idx + k].imageLayout, sizeof(VkImageLayout), hash);
1662 
1663       struct zink_sampler_state *sampler_state = ctx->sampler_states[shader][idx + k];
1664 
1665       if (sampler_state)
1666          hash = XXH32(&sampler_state->hash, sizeof(uint32_t), hash);
1667    }
1668    return hash;
1669 }
1670 
1671 static uint32_t
calc_descriptor_state_hash_image(struct zink_context * ctx,struct zink_shader * zs,enum pipe_shader_type shader,int i,int idx,uint32_t hash)1672 calc_descriptor_state_hash_image(struct zink_context *ctx, struct zink_shader *zs, enum pipe_shader_type shader, int i, int idx, uint32_t hash)
1673 {
1674    for (unsigned k = 0; k < zs->bindings[ZINK_DESCRIPTOR_TYPE_IMAGE][i].size; k++) {
1675       bool is_buffer = zink_shader_descriptor_is_buffer(zs, ZINK_DESCRIPTOR_TYPE_IMAGE, i);
1676       uint32_t val = zink_get_image_view_hash(ctx, &ctx->image_views[shader][idx + k], is_buffer);
1677       ctx->di.image_surfaces[shader][idx + k].is_buffer = is_buffer;
1678       hash = XXH32(&val, sizeof(uint32_t), hash);
1679    }
1680    return hash;
1681 }
1682 
1683 static uint32_t
update_descriptor_stage_state(struct zink_context * ctx,enum pipe_shader_type shader,enum zink_descriptor_type type)1684 update_descriptor_stage_state(struct zink_context *ctx, enum pipe_shader_type shader, enum zink_descriptor_type type)
1685 {
1686    struct zink_shader *zs = shader == PIPE_SHADER_COMPUTE ? ctx->compute_stage : ctx->gfx_stages[shader];
1687 
1688    uint32_t hash = 0;
1689    for (int i = 0; i < zs->num_bindings[type]; i++) {
1690       /* skip push set members */
1691       if (zs->bindings[type][i].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
1692          continue;
1693 
1694       int idx = zs->bindings[type][i].index;
1695       switch (type) {
1696       case ZINK_DESCRIPTOR_TYPE_UBO:
1697          hash = calc_descriptor_state_hash_ubo(ctx, zs, shader, i, idx, hash, true);
1698          break;
1699       case ZINK_DESCRIPTOR_TYPE_SSBO:
1700          hash = calc_descriptor_state_hash_ssbo(ctx, zs, shader, i, idx, hash);
1701          break;
1702       case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
1703          hash = calc_descriptor_state_hash_sampler(ctx, zs, shader, i, idx, hash);
1704          break;
1705       case ZINK_DESCRIPTOR_TYPE_IMAGE:
1706          hash = calc_descriptor_state_hash_image(ctx, zs, shader, i, idx, hash);
1707          break;
1708       default:
1709          unreachable("unknown descriptor type");
1710       }
1711    }
1712    return hash;
1713 }
1714 
1715 static void
update_descriptor_state(struct zink_context * ctx,enum zink_descriptor_type type,bool is_compute)1716 update_descriptor_state(struct zink_context *ctx, enum zink_descriptor_type type, bool is_compute)
1717 {
1718    /* we shouldn't be calling this if we don't have to */
1719    assert(!ctx->dd->descriptor_states[is_compute].valid[type]);
1720    bool has_any_usage = false;
1721 
1722    if (is_compute) {
1723       /* just update compute state */
1724       bool has_usage = zink_program_get_descriptor_usage(ctx, PIPE_SHADER_COMPUTE, type);
1725       if (has_usage)
1726          ctx->dd->descriptor_states[is_compute].state[type] = update_descriptor_stage_state(ctx, PIPE_SHADER_COMPUTE, type);
1727       else
1728          ctx->dd->descriptor_states[is_compute].state[type] = 0;
1729       has_any_usage = has_usage;
1730    } else {
1731       /* update all gfx states */
1732       bool first = true;
1733       for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++) {
1734          bool has_usage = false;
1735          /* this is the incremental update for the shader stage */
1736          if (!ctx->dd->gfx_descriptor_states[i].valid[type]) {
1737             ctx->dd->gfx_descriptor_states[i].state[type] = 0;
1738             if (ctx->gfx_stages[i]) {
1739                has_usage = zink_program_get_descriptor_usage(ctx, i, type);
1740                if (has_usage)
1741                   ctx->dd->gfx_descriptor_states[i].state[type] = update_descriptor_stage_state(ctx, i, type);
1742                ctx->dd->gfx_descriptor_states[i].valid[type] = has_usage;
1743             }
1744          }
1745          if (ctx->dd->gfx_descriptor_states[i].valid[type]) {
1746             /* this is the overall state update for the descriptor set hash */
1747             if (first) {
1748                /* no need to double hash the first state */
1749                ctx->dd->descriptor_states[is_compute].state[type] = ctx->dd->gfx_descriptor_states[i].state[type];
1750                first = false;
1751             } else {
1752                ctx->dd->descriptor_states[is_compute].state[type] ^= ctx->dd->gfx_descriptor_states[i].state[type];
1753             }
1754          }
1755          has_any_usage |= has_usage;
1756       }
1757    }
1758    ctx->dd->descriptor_states[is_compute].valid[type] = has_any_usage;
1759 }
1760 
1761 static void
zink_context_update_descriptor_states(struct zink_context * ctx,struct zink_program * pg)1762 zink_context_update_descriptor_states(struct zink_context *ctx, struct zink_program *pg)
1763 {
1764    struct zink_screen *screen = zink_screen(ctx->base.screen);
1765    if (pg->dd->push_usage && (!ctx->dd->push_valid[pg->is_compute] ||
1766                                            pg->dd->push_usage != ctx->dd->last_push_usage[pg->is_compute])) {
1767       uint32_t hash = 0;
1768       if (pg->is_compute) {
1769           hash = calc_descriptor_state_hash_ubo(ctx, ctx->compute_stage, PIPE_SHADER_COMPUTE, 0, 0, 0, false);
1770       } else {
1771          bool first = true;
1772          u_foreach_bit(stage, pg->dd->push_usage) {
1773             if (!ctx->dd->gfx_push_valid[stage]) {
1774                ctx->dd->gfx_push_state[stage] = calc_descriptor_state_hash_ubo(ctx, ctx->gfx_stages[stage], stage, 0, 0, 0, false);
1775                ctx->dd->gfx_push_valid[stage] = true;
1776             }
1777             if (first)
1778                hash = ctx->dd->gfx_push_state[stage];
1779             else
1780                hash ^= ctx->dd->gfx_push_state[stage];
1781             first = false;
1782          }
1783       }
1784       ctx->dd->changed[pg->is_compute][ZINK_DESCRIPTOR_TYPES] |= ctx->dd->push_state[pg->is_compute] != hash;
1785       ctx->dd->changed[pg->is_compute][ZINK_DESCRIPTOR_TYPES] |= pg->dd->push_usage != ctx->dd->last_push_usage[pg->is_compute];
1786       ctx->dd->push_state[pg->is_compute] = hash;
1787       ctx->dd->push_valid[pg->is_compute] = true;
1788       ctx->dd->last_push_usage[pg->is_compute] = pg->dd->push_usage;
1789    }
1790    for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
1791       if (pdd_cached(pg)->pool[screen->desc_set_id[i] - 1] && pdd_cached(pg)->cache_misses[i] < MAX_CACHE_MISSES &&
1792           ctx->dd->changed[pg->is_compute][i] &&
1793           !ctx->dd->descriptor_states[pg->is_compute].valid[i])
1794          update_descriptor_state(ctx, i, pg->is_compute);
1795    }
1796 
1797    if (!screen->compact_descriptors)
1798       return;
1799 
1800    for (unsigned n = 0; n < 2; n++) {
1801       ctx->dd->compact_descriptor_states[pg->is_compute].valid[n] = ctx->dd->descriptor_states[pg->is_compute].valid[n] |
1802                                                                     ctx->dd->descriptor_states[pg->is_compute].valid[n + ZINK_DESCRIPTOR_COMPACT];
1803       if (ctx->dd->compact_descriptor_states[pg->is_compute].valid[n]) {
1804          if (pg->is_compute) {
1805             ctx->dd->compact_descriptor_states[pg->is_compute].state[n] = ctx->dd->descriptor_states[pg->is_compute].state[n] ^
1806                                                                           ctx->dd->descriptor_states[pg->is_compute].state[n + ZINK_DESCRIPTOR_COMPACT];
1807          } else {
1808             uint32_t hash = 0;
1809             bool first = true;
1810             for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++) {
1811                ctx->dd->compact_gfx_descriptor_states[i].valid[n] = ctx->dd->gfx_descriptor_states[i].valid[n] |
1812                                                                     ctx->dd->gfx_descriptor_states[i].valid[n + ZINK_DESCRIPTOR_COMPACT];
1813                if (ctx->dd->compact_gfx_descriptor_states[i].valid[n]) {
1814                   ctx->dd->compact_gfx_descriptor_states[i].state[n] = ctx->dd->gfx_descriptor_states[i].state[n] ^
1815                                                                        ctx->dd->gfx_descriptor_states[i].state[n + ZINK_DESCRIPTOR_COMPACT];
1816                   if (first)
1817                      hash = ctx->dd->compact_gfx_descriptor_states[i].state[n];
1818                   else
1819                      hash ^= ctx->dd->compact_gfx_descriptor_states[i].state[n];
1820                   first = false;
1821                } else {
1822                   ctx->dd->compact_gfx_descriptor_states[i].state[n] = 0;
1823                }
1824             }
1825             ctx->dd->compact_descriptor_states[pg->is_compute].state[n] = hash;
1826          }
1827       } else {
1828          ctx->dd->compact_descriptor_states[pg->is_compute].state[n] = 0;
1829       }
1830    }
1831 }
1832 
1833 void
zink_context_invalidate_descriptor_state(struct zink_context * ctx,enum pipe_shader_type shader,enum zink_descriptor_type type,unsigned start,unsigned count)1834 zink_context_invalidate_descriptor_state(struct zink_context *ctx, enum pipe_shader_type shader, enum zink_descriptor_type type, unsigned start, unsigned count)
1835 {
1836    zink_context_invalidate_descriptor_state_lazy(ctx, shader, type, start, count);
1837    if (type == ZINK_DESCRIPTOR_TYPE_UBO && !start) {
1838       /* ubo 0 is the push set */
1839       ctx->dd->push_state[shader == PIPE_SHADER_COMPUTE] = 0;
1840       ctx->dd->push_valid[shader == PIPE_SHADER_COMPUTE] = false;
1841       if (shader != PIPE_SHADER_COMPUTE) {
1842          ctx->dd->gfx_push_state[shader] = 0;
1843          ctx->dd->gfx_push_valid[shader] = false;
1844       }
1845       ctx->dd->changed[shader == PIPE_SHADER_COMPUTE][ZINK_DESCRIPTOR_TYPES] = true;
1846       return;
1847    }
1848    if (shader != PIPE_SHADER_COMPUTE) {
1849       ctx->dd->gfx_descriptor_states[shader].valid[type] = false;
1850       ctx->dd->gfx_descriptor_states[shader].state[type] = 0;
1851    }
1852    ctx->dd->descriptor_states[shader == PIPE_SHADER_COMPUTE].valid[type] = false;
1853    ctx->dd->descriptor_states[shader == PIPE_SHADER_COMPUTE].state[type] = 0;
1854    ctx->dd->changed[shader == PIPE_SHADER_COMPUTE][type] = true;
1855 }
1856 
1857 bool
zink_descriptors_init(struct zink_context * ctx)1858 zink_descriptors_init(struct zink_context *ctx)
1859 {
1860    zink_descriptors_init_lazy(ctx);
1861    if (!ctx->dd)
1862       return false;
1863    return zink_descriptor_pool_init(ctx);
1864 }
1865 
1866 void
zink_descriptors_deinit(struct zink_context * ctx)1867 zink_descriptors_deinit(struct zink_context *ctx)
1868 {
1869    zink_descriptor_pool_deinit(ctx);
1870    zink_descriptors_deinit_lazy(ctx);
1871 }
1872 
1873 bool
zink_descriptor_layouts_init(struct zink_context * ctx)1874 zink_descriptor_layouts_init(struct zink_context *ctx)
1875 {
1876    for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
1877       if (!_mesa_hash_table_init(&ctx->desc_set_layouts[i], ctx, hash_descriptor_layout, equals_descriptor_layout))
1878          return false;
1879       if (!_mesa_set_init(&ctx->desc_pool_keys[i], ctx, hash_descriptor_pool_key, equals_descriptor_pool_key))
1880          return false;
1881    }
1882    return true;
1883 }
1884 
1885 void
zink_descriptor_layouts_deinit(struct zink_context * ctx)1886 zink_descriptor_layouts_deinit(struct zink_context *ctx)
1887 {
1888    struct zink_screen *screen = zink_screen(ctx->base.screen);
1889    for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
1890       hash_table_foreach(&ctx->desc_set_layouts[i], he) {
1891          struct zink_descriptor_layout *layout = he->data;
1892          VKSCR(DestroyDescriptorSetLayout)(screen->dev, layout->layout, NULL);
1893          ralloc_free(layout);
1894          _mesa_hash_table_remove(&ctx->desc_set_layouts[i], he);
1895       }
1896    }
1897 }
1898 
1899 
1900 void
zink_descriptor_util_init_fbfetch(struct zink_context * ctx)1901 zink_descriptor_util_init_fbfetch(struct zink_context *ctx)
1902 {
1903    if (ctx->dd->has_fbfetch)
1904       return;
1905 
1906    struct zink_screen *screen = zink_screen(ctx->base.screen);
1907    VKSCR(DestroyDescriptorSetLayout)(screen->dev, ctx->dd->push_dsl[0]->layout, NULL);
1908    //don't free these now, let ralloc free on teardown to avoid invalid access
1909    //ralloc_free(ctx->dd->push_dsl[0]);
1910    //ralloc_free(ctx->dd->push_layout_keys[0]);
1911    ctx->dd->push_dsl[0] = create_gfx_layout(ctx, &ctx->dd->push_layout_keys[0], true);
1912    ctx->dd->has_fbfetch = true;
1913    if (zink_descriptor_mode != ZINK_DESCRIPTOR_MODE_LAZY)
1914       zink_descriptor_pool_init(ctx);
1915 }
1916 
1917 ALWAYS_INLINE static VkDescriptorType
type_from_bindless_index(unsigned idx)1918 type_from_bindless_index(unsigned idx)
1919 {
1920    switch (idx) {
1921    case 0: return VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
1922    case 1: return VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
1923    case 2: return VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
1924    case 3: return VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
1925    default:
1926       unreachable("unknown index");
1927    }
1928 }
1929 
1930 void
zink_descriptors_init_bindless(struct zink_context * ctx)1931 zink_descriptors_init_bindless(struct zink_context *ctx)
1932 {
1933    if (ctx->dd->bindless_set)
1934       return;
1935 
1936    struct zink_screen *screen = zink_screen(ctx->base.screen);
1937    VkDescriptorSetLayoutBinding bindings[4];
1938    const unsigned num_bindings = 4;
1939    VkDescriptorSetLayoutCreateInfo dcslci = {0};
1940    dcslci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
1941    dcslci.pNext = NULL;
1942    VkDescriptorSetLayoutBindingFlagsCreateInfo fci = {0};
1943    VkDescriptorBindingFlags flags[4];
1944    dcslci.pNext = &fci;
1945    dcslci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT;
1946    fci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO;
1947    fci.bindingCount = num_bindings;
1948    fci.pBindingFlags = flags;
1949    for (unsigned i = 0; i < num_bindings; i++) {
1950       flags[i] = VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT | VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT | VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT;
1951    }
1952    for (unsigned i = 0; i < num_bindings; i++) {
1953       bindings[i].binding = i;
1954       bindings[i].descriptorType = type_from_bindless_index(i);
1955       bindings[i].descriptorCount = ZINK_MAX_BINDLESS_HANDLES;
1956       bindings[i].stageFlags = VK_SHADER_STAGE_ALL_GRAPHICS | VK_SHADER_STAGE_COMPUTE_BIT;
1957       bindings[i].pImmutableSamplers = NULL;
1958    }
1959 
1960    dcslci.bindingCount = num_bindings;
1961    dcslci.pBindings = bindings;
1962    VkResult result = VKSCR(CreateDescriptorSetLayout)(screen->dev, &dcslci, 0, &ctx->dd->bindless_layout);
1963    if (result != VK_SUCCESS) {
1964       mesa_loge("ZINK: vkCreateDescriptorSetLayout failed (%s)", vk_Result_to_str(result));
1965       return;
1966    }
1967 
1968    VkDescriptorPoolCreateInfo dpci = {0};
1969    VkDescriptorPoolSize sizes[4];
1970    for (unsigned i = 0; i < 4; i++) {
1971       sizes[i].type = type_from_bindless_index(i);
1972       sizes[i].descriptorCount = ZINK_MAX_BINDLESS_HANDLES;
1973    }
1974    dpci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
1975    dpci.pPoolSizes = sizes;
1976    dpci.poolSizeCount = 4;
1977    dpci.flags = VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT;
1978    dpci.maxSets = 1;
1979    result = VKSCR(CreateDescriptorPool)(screen->dev, &dpci, 0, &ctx->dd->bindless_pool);
1980    if (result != VK_SUCCESS) {
1981       mesa_loge("ZINK: vkCreateDescriptorPool failed (%s)", vk_Result_to_str(result));
1982       return;
1983    }
1984 
1985    zink_descriptor_util_alloc_sets(screen, ctx->dd->bindless_layout, ctx->dd->bindless_pool, &ctx->dd->bindless_set, 1);
1986 }
1987 
1988 void
zink_descriptors_deinit_bindless(struct zink_context * ctx)1989 zink_descriptors_deinit_bindless(struct zink_context *ctx)
1990 {
1991    struct zink_screen *screen = zink_screen(ctx->base.screen);
1992    if (ctx->dd->bindless_layout)
1993       VKSCR(DestroyDescriptorSetLayout)(screen->dev, ctx->dd->bindless_layout, NULL);
1994    if (ctx->dd->bindless_pool)
1995       VKSCR(DestroyDescriptorPool)(screen->dev, ctx->dd->bindless_pool, NULL);
1996 }
1997 
1998 void
zink_descriptors_update_bindless(struct zink_context * ctx)1999 zink_descriptors_update_bindless(struct zink_context *ctx)
2000 {
2001    struct zink_screen *screen = zink_screen(ctx->base.screen);
2002    for (unsigned i = 0; i < 2; i++) {
2003       if (!ctx->di.bindless_dirty[i])
2004          continue;
2005       while (util_dynarray_contains(&ctx->di.bindless[i].updates, uint32_t)) {
2006          uint32_t handle = util_dynarray_pop(&ctx->di.bindless[i].updates, uint32_t);
2007          bool is_buffer = ZINK_BINDLESS_IS_BUFFER(handle);
2008          VkWriteDescriptorSet wd;
2009          wd.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
2010          wd.pNext = NULL;
2011          wd.dstSet = ctx->dd->bindless_set;
2012          wd.dstBinding = is_buffer ? i * 2 + 1: i * 2;
2013          wd.dstArrayElement = is_buffer ? handle - ZINK_MAX_BINDLESS_HANDLES : handle;
2014          wd.descriptorCount = 1;
2015          wd.descriptorType = type_from_bindless_index(wd.dstBinding);
2016          if (is_buffer)
2017             wd.pTexelBufferView = &ctx->di.bindless[i].buffer_infos[wd.dstArrayElement];
2018          else
2019             wd.pImageInfo = &ctx->di.bindless[i].img_infos[handle];
2020          VKSCR(UpdateDescriptorSets)(screen->dev, 1, &wd, 0, NULL);
2021       }
2022    }
2023    ctx->di.any_bindless_dirty = 0;
2024 }
2025