1 /*
2 * Copyright © 2020 Mike Blumenkrantz
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Mike Blumenkrantz <michael.blumenkrantz@gmail.com>
25 */
26
27 #include "tgsi/tgsi_from_mesa.h"
28
29
30
31 #include "zink_context.h"
32 #include "zink_descriptors.h"
33 #include "zink_program.h"
34 #include "zink_resource.h"
35 #include "zink_screen.h"
36
37 #define XXH_INLINE_ALL
38 #include "util/xxhash.h"
39
40
41 struct zink_descriptor_pool {
42 struct pipe_reference reference;
43 enum zink_descriptor_type type;
44 struct hash_table *desc_sets;
45 struct hash_table *free_desc_sets;
46 struct util_dynarray alloc_desc_sets;
47 VkDescriptorPool descpool;
48 struct zink_descriptor_pool_key key;
49 unsigned num_resources;
50 unsigned num_sets_allocated;
51 simple_mtx_t mtx;
52 };
53
54 struct zink_descriptor_set {
55 struct zink_descriptor_pool *pool;
56 struct pipe_reference reference; //incremented for batch usage
57 VkDescriptorSet desc_set;
58 uint32_t hash;
59 bool invalid;
60 bool punted;
61 bool recycled;
62 struct zink_descriptor_state_key key;
63 struct zink_batch_usage *batch_uses;
64 #ifndef NDEBUG
65 /* for extra debug asserts */
66 unsigned num_resources;
67 #endif
68 union {
69 struct zink_resource_object **res_objs;
70 struct {
71 struct zink_descriptor_surface *surfaces;
72 struct zink_sampler_state **sampler_states;
73 };
74 };
75 };
76
77 union zink_program_descriptor_refs {
78 struct zink_resource **res;
79 struct zink_descriptor_surface *dsurf;
80 struct {
81 struct zink_descriptor_surface *dsurf;
82 struct zink_sampler_state **sampler_state;
83 } sampler;
84 };
85
86 struct zink_program_descriptor_data_cached {
87 struct zink_program_descriptor_data base;
88 struct zink_descriptor_pool *pool[ZINK_DESCRIPTOR_TYPES];
89 struct zink_descriptor_set *last_set[ZINK_DESCRIPTOR_TYPES];
90 unsigned num_refs[ZINK_DESCRIPTOR_TYPES];
91 union zink_program_descriptor_refs *refs[ZINK_DESCRIPTOR_TYPES];
92 unsigned cache_misses[ZINK_DESCRIPTOR_TYPES];
93 };
94
95
96 static inline struct zink_program_descriptor_data_cached *
pdd_cached(struct zink_program * pg)97 pdd_cached(struct zink_program *pg)
98 {
99 return (struct zink_program_descriptor_data_cached*)pg->dd;
100 }
101
102 static bool
batch_add_desc_set(struct zink_batch * batch,struct zink_descriptor_set * zds)103 batch_add_desc_set(struct zink_batch *batch, struct zink_descriptor_set *zds)
104 {
105 if (zink_batch_usage_matches(zds->batch_uses, batch->state) ||
106 !batch_ptr_add_usage(batch, batch->state->dd->desc_sets, zds))
107 return false;
108 pipe_reference(NULL, &zds->reference);
109 zink_batch_usage_set(&zds->batch_uses, batch->state);
110 return true;
111 }
112
113 static void
debug_describe_zink_descriptor_pool(char * buf,const struct zink_descriptor_pool * ptr)114 debug_describe_zink_descriptor_pool(char *buf, const struct zink_descriptor_pool *ptr)
115 {
116 sprintf(buf, "zink_descriptor_pool");
117 }
118
119 static inline uint32_t
get_sampler_view_hash(const struct zink_sampler_view * sampler_view)120 get_sampler_view_hash(const struct zink_sampler_view *sampler_view)
121 {
122 if (!sampler_view)
123 return 0;
124 return sampler_view->base.target == PIPE_BUFFER ?
125 sampler_view->buffer_view->hash : sampler_view->image_view->hash;
126 }
127
128 static inline uint32_t
get_image_view_hash(const struct zink_image_view * image_view)129 get_image_view_hash(const struct zink_image_view *image_view)
130 {
131 if (!image_view || !image_view->base.resource)
132 return 0;
133 return image_view->base.resource->target == PIPE_BUFFER ?
134 image_view->buffer_view->hash : image_view->surface->hash;
135 }
136
137 uint32_t
zink_get_sampler_view_hash(struct zink_context * ctx,struct zink_sampler_view * sampler_view,bool is_buffer)138 zink_get_sampler_view_hash(struct zink_context *ctx, struct zink_sampler_view *sampler_view, bool is_buffer)
139 {
140 return get_sampler_view_hash(sampler_view) ? get_sampler_view_hash(sampler_view) :
141 (is_buffer ? zink_screen(ctx->base.screen)->null_descriptor_hashes.buffer_view :
142 zink_screen(ctx->base.screen)->null_descriptor_hashes.image_view);
143 }
144
145 uint32_t
zink_get_image_view_hash(struct zink_context * ctx,struct zink_image_view * image_view,bool is_buffer)146 zink_get_image_view_hash(struct zink_context *ctx, struct zink_image_view *image_view, bool is_buffer)
147 {
148 return get_image_view_hash(image_view) ? get_image_view_hash(image_view) :
149 (is_buffer ? zink_screen(ctx->base.screen)->null_descriptor_hashes.buffer_view :
150 zink_screen(ctx->base.screen)->null_descriptor_hashes.image_view);
151 }
152
153 #ifndef NDEBUG
154 static uint32_t
get_descriptor_surface_hash(struct zink_context * ctx,struct zink_descriptor_surface * dsurf)155 get_descriptor_surface_hash(struct zink_context *ctx, struct zink_descriptor_surface *dsurf)
156 {
157 return dsurf->is_buffer ? (dsurf->bufferview ? dsurf->bufferview->hash : zink_screen(ctx->base.screen)->null_descriptor_hashes.buffer_view) :
158 (dsurf->surface ? dsurf->surface->hash : zink_screen(ctx->base.screen)->null_descriptor_hashes.image_view);
159 }
160 #endif
161
162 static bool
desc_state_equal(const void * a,const void * b)163 desc_state_equal(const void *a, const void *b)
164 {
165 const struct zink_descriptor_state_key *a_k = (void*)a;
166 const struct zink_descriptor_state_key *b_k = (void*)b;
167
168 for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++) {
169 if (a_k->exists[i] != b_k->exists[i])
170 return false;
171 if (a_k->exists[i] && b_k->exists[i] &&
172 a_k->state[i] != b_k->state[i])
173 return false;
174 }
175 return true;
176 }
177
178 static uint32_t
desc_state_hash(const void * key)179 desc_state_hash(const void *key)
180 {
181 const struct zink_descriptor_state_key *d_key = (void*)key;
182 uint32_t hash = 0;
183 bool first = true;
184 for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++) {
185 if (d_key->exists[i]) {
186 if (!first)
187 hash = XXH32(&d_key->state[i], sizeof(uint32_t), hash);
188 else
189 hash = d_key->state[i];
190 first = false;
191 }
192 }
193 return hash;
194 }
195
196 static void
pop_desc_set_ref(struct zink_descriptor_set * zds,struct util_dynarray * refs)197 pop_desc_set_ref(struct zink_descriptor_set *zds, struct util_dynarray *refs)
198 {
199 size_t size = sizeof(struct zink_descriptor_reference);
200 unsigned num_elements = refs->size / size;
201 for (unsigned i = 0; i < num_elements; i++) {
202 struct zink_descriptor_reference *ref = util_dynarray_element(refs, struct zink_descriptor_reference, i);
203 if (&zds->invalid == ref->invalid) {
204 memcpy(util_dynarray_element(refs, struct zink_descriptor_reference, i),
205 util_dynarray_pop_ptr(refs, struct zink_descriptor_reference), size);
206 break;
207 }
208 }
209 }
210
211 static void
descriptor_set_invalidate(struct zink_descriptor_set * zds)212 descriptor_set_invalidate(struct zink_descriptor_set *zds)
213 {
214 zds->invalid = true;
215 for (unsigned i = 0; i < zds->pool->key.layout->num_descriptors; i++) {
216 switch (zds->pool->type) {
217 case ZINK_DESCRIPTOR_TYPE_UBO:
218 case ZINK_DESCRIPTOR_TYPE_SSBO:
219 if (zds->res_objs[i])
220 pop_desc_set_ref(zds, &zds->res_objs[i]->desc_set_refs.refs);
221 zds->res_objs[i] = NULL;
222 break;
223 case ZINK_DESCRIPTOR_TYPE_IMAGE:
224 if (zds->surfaces[i].is_buffer) {
225 if (zds->surfaces[i].bufferview)
226 pop_desc_set_ref(zds, &zds->surfaces[i].bufferview->desc_set_refs.refs);
227 zds->surfaces[i].bufferview = NULL;
228 } else {
229 if (zds->surfaces[i].surface)
230 pop_desc_set_ref(zds, &zds->surfaces[i].surface->desc_set_refs.refs);
231 zds->surfaces[i].surface = NULL;
232 }
233 break;
234 case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
235 if (zds->surfaces[i].is_buffer) {
236 if (zds->surfaces[i].bufferview)
237 pop_desc_set_ref(zds, &zds->surfaces[i].bufferview->desc_set_refs.refs);
238 zds->surfaces[i].bufferview = NULL;
239 } else {
240 if (zds->surfaces[i].surface)
241 pop_desc_set_ref(zds, &zds->surfaces[i].surface->desc_set_refs.refs);
242 zds->surfaces[i].surface = NULL;
243 }
244 if (zds->sampler_states[i])
245 pop_desc_set_ref(zds, &zds->sampler_states[i]->desc_set_refs.refs);
246 zds->sampler_states[i] = NULL;
247 break;
248 default:
249 break;
250 }
251 }
252 }
253
254 #ifndef NDEBUG
255 static void
descriptor_pool_clear(struct hash_table * ht)256 descriptor_pool_clear(struct hash_table *ht)
257 {
258 _mesa_hash_table_clear(ht, NULL);
259 }
260 #endif
261
262 static void
descriptor_pool_free(struct zink_screen * screen,struct zink_descriptor_pool * pool)263 descriptor_pool_free(struct zink_screen *screen, struct zink_descriptor_pool *pool)
264 {
265 if (!pool)
266 return;
267 if (pool->descpool)
268 VKSCR(DestroyDescriptorPool)(screen->dev, pool->descpool, NULL);
269
270 simple_mtx_lock(&pool->mtx);
271 #ifndef NDEBUG
272 if (pool->desc_sets)
273 descriptor_pool_clear(pool->desc_sets);
274 if (pool->free_desc_sets)
275 descriptor_pool_clear(pool->free_desc_sets);
276 #endif
277 if (pool->desc_sets)
278 _mesa_hash_table_destroy(pool->desc_sets, NULL);
279 if (pool->free_desc_sets)
280 _mesa_hash_table_destroy(pool->free_desc_sets, NULL);
281
282 simple_mtx_unlock(&pool->mtx);
283 util_dynarray_fini(&pool->alloc_desc_sets);
284 simple_mtx_destroy(&pool->mtx);
285 ralloc_free(pool);
286 }
287
288 static struct zink_descriptor_pool *
descriptor_pool_create(struct zink_screen * screen,enum zink_descriptor_type type,struct zink_descriptor_layout_key * layout_key,VkDescriptorPoolSize * sizes,unsigned num_type_sizes)289 descriptor_pool_create(struct zink_screen *screen, enum zink_descriptor_type type,
290 struct zink_descriptor_layout_key *layout_key, VkDescriptorPoolSize *sizes, unsigned num_type_sizes)
291 {
292 struct zink_descriptor_pool *pool = rzalloc(NULL, struct zink_descriptor_pool);
293 if (!pool)
294 return NULL;
295 pipe_reference_init(&pool->reference, 1);
296 pool->type = type;
297 pool->key.layout = layout_key;
298 pool->key.num_type_sizes = num_type_sizes;
299 size_t types_size = num_type_sizes * sizeof(VkDescriptorPoolSize);
300 pool->key.sizes = ralloc_size(pool, types_size);
301 if (!pool->key.sizes) {
302 ralloc_free(pool);
303 return NULL;
304 }
305 memcpy(pool->key.sizes, sizes, types_size);
306 simple_mtx_init(&pool->mtx, mtx_plain);
307 for (unsigned i = 0; i < layout_key->num_descriptors; i++) {
308 pool->num_resources += layout_key->bindings[i].descriptorCount;
309 }
310 pool->desc_sets = _mesa_hash_table_create(NULL, desc_state_hash, desc_state_equal);
311 if (!pool->desc_sets)
312 goto fail;
313
314 pool->free_desc_sets = _mesa_hash_table_create(NULL, desc_state_hash, desc_state_equal);
315 if (!pool->free_desc_sets)
316 goto fail;
317
318 util_dynarray_init(&pool->alloc_desc_sets, NULL);
319
320 VkDescriptorPoolCreateInfo dpci = {0};
321 dpci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
322 dpci.pPoolSizes = sizes;
323 dpci.poolSizeCount = num_type_sizes;
324 dpci.flags = 0;
325 dpci.maxSets = ZINK_DEFAULT_MAX_DESCS;
326 if (VKSCR(CreateDescriptorPool)(screen->dev, &dpci, 0, &pool->descpool) != VK_SUCCESS) {
327 debug_printf("vkCreateDescriptorPool failed\n");
328 goto fail;
329 }
330
331 return pool;
332 fail:
333 descriptor_pool_free(screen, pool);
334 return NULL;
335 }
336
337 static VkDescriptorSetLayout
descriptor_layout_create(struct zink_screen * screen,enum zink_descriptor_type t,VkDescriptorSetLayoutBinding * bindings,unsigned num_bindings)338 descriptor_layout_create(struct zink_screen *screen, enum zink_descriptor_type t, VkDescriptorSetLayoutBinding *bindings, unsigned num_bindings)
339 {
340 VkDescriptorSetLayout dsl;
341 VkDescriptorSetLayoutCreateInfo dcslci = {0};
342 dcslci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
343 dcslci.pNext = NULL;
344 VkDescriptorSetLayoutBindingFlagsCreateInfo fci = {0};
345 VkDescriptorBindingFlags flags[ZINK_MAX_DESCRIPTORS_PER_TYPE];
346 if (screen->descriptor_mode == ZINK_DESCRIPTOR_MODE_LAZY) {
347 dcslci.pNext = &fci;
348 if (t == ZINK_DESCRIPTOR_TYPES)
349 dcslci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR;
350 fci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO;
351 fci.bindingCount = num_bindings;
352 fci.pBindingFlags = flags;
353 for (unsigned i = 0; i < num_bindings; i++) {
354 flags[i] = 0;
355 }
356 }
357 dcslci.bindingCount = num_bindings;
358 dcslci.pBindings = bindings;
359 VkDescriptorSetLayoutSupport supp;
360 supp.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT;
361 supp.pNext = NULL;
362 supp.supported = VK_FALSE;
363 if (VKSCR(GetDescriptorSetLayoutSupport)) {
364 VKSCR(GetDescriptorSetLayoutSupport)(screen->dev, &dcslci, &supp);
365 if (supp.supported == VK_FALSE) {
366 debug_printf("vkGetDescriptorSetLayoutSupport claims layout is unsupported\n");
367 return VK_NULL_HANDLE;
368 }
369 }
370 if (VKSCR(CreateDescriptorSetLayout)(screen->dev, &dcslci, 0, &dsl) != VK_SUCCESS)
371 debug_printf("vkCreateDescriptorSetLayout failed\n");
372 return dsl;
373 }
374
375 static uint32_t
hash_descriptor_layout(const void * key)376 hash_descriptor_layout(const void *key)
377 {
378 uint32_t hash = 0;
379 const struct zink_descriptor_layout_key *k = key;
380 hash = XXH32(&k->num_descriptors, sizeof(unsigned), hash);
381 hash = XXH32(k->bindings, k->num_descriptors * sizeof(VkDescriptorSetLayoutBinding), hash);
382
383 return hash;
384 }
385
386 static bool
equals_descriptor_layout(const void * a,const void * b)387 equals_descriptor_layout(const void *a, const void *b)
388 {
389 const struct zink_descriptor_layout_key *a_k = a;
390 const struct zink_descriptor_layout_key *b_k = b;
391 return a_k->num_descriptors == b_k->num_descriptors &&
392 !memcmp(a_k->bindings, b_k->bindings, a_k->num_descriptors * sizeof(VkDescriptorSetLayoutBinding));
393 }
394
395 static struct zink_descriptor_layout *
create_layout(struct zink_context * ctx,enum zink_descriptor_type type,VkDescriptorSetLayoutBinding * bindings,unsigned num_bindings,struct zink_descriptor_layout_key ** layout_key)396 create_layout(struct zink_context *ctx, enum zink_descriptor_type type,
397 VkDescriptorSetLayoutBinding *bindings, unsigned num_bindings,
398 struct zink_descriptor_layout_key **layout_key)
399 {
400 struct zink_screen *screen = zink_screen(ctx->base.screen);
401 VkDescriptorSetLayout dsl = descriptor_layout_create(screen, type, bindings, MAX2(num_bindings, 1));
402 if (!dsl)
403 return NULL;
404
405 struct zink_descriptor_layout_key *k = ralloc(ctx, struct zink_descriptor_layout_key);
406 k->use_count = 0;
407 k->num_descriptors = num_bindings;
408 size_t bindings_size = MAX2(num_bindings, 1) * sizeof(VkDescriptorSetLayoutBinding);
409 k->bindings = ralloc_size(k, bindings_size);
410 if (!k->bindings) {
411 ralloc_free(k);
412 VKSCR(DestroyDescriptorSetLayout)(screen->dev, dsl, NULL);
413 return NULL;
414 }
415 memcpy(k->bindings, bindings, bindings_size);
416
417 struct zink_descriptor_layout *layout = rzalloc(ctx, struct zink_descriptor_layout);
418 layout->layout = dsl;
419 *layout_key = k;
420 return layout;
421 }
422
423 struct zink_descriptor_layout *
zink_descriptor_util_layout_get(struct zink_context * ctx,enum zink_descriptor_type type,VkDescriptorSetLayoutBinding * bindings,unsigned num_bindings,struct zink_descriptor_layout_key ** layout_key)424 zink_descriptor_util_layout_get(struct zink_context *ctx, enum zink_descriptor_type type,
425 VkDescriptorSetLayoutBinding *bindings, unsigned num_bindings,
426 struct zink_descriptor_layout_key **layout_key)
427 {
428 uint32_t hash = 0;
429 struct zink_descriptor_layout_key key = {
430 .num_descriptors = num_bindings,
431 .bindings = bindings,
432 };
433
434 VkDescriptorSetLayoutBinding null_binding;
435 if (!bindings) {
436 null_binding.binding = 0;
437 null_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
438 null_binding.descriptorCount = 1;
439 null_binding.pImmutableSamplers = NULL;
440 null_binding.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT |
441 VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT |
442 VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT | VK_SHADER_STAGE_COMPUTE_BIT;
443 key.bindings = &null_binding;
444 }
445
446 if (type != ZINK_DESCRIPTOR_TYPES) {
447 hash = hash_descriptor_layout(&key);
448 struct hash_entry *he = _mesa_hash_table_search_pre_hashed(&ctx->desc_set_layouts[type], hash, &key);
449 if (he) {
450 *layout_key = (void*)he->key;
451 return he->data;
452 }
453 }
454
455 struct zink_descriptor_layout *layout = create_layout(ctx, type, bindings ? bindings : &null_binding, num_bindings, layout_key);
456 if (layout && type != ZINK_DESCRIPTOR_TYPES) {
457 _mesa_hash_table_insert_pre_hashed(&ctx->desc_set_layouts[type], hash, *layout_key, layout);
458 }
459 return layout;
460 }
461
462 static void
init_push_binding(VkDescriptorSetLayoutBinding * binding,unsigned i,VkDescriptorType type)463 init_push_binding(VkDescriptorSetLayoutBinding *binding, unsigned i, VkDescriptorType type)
464 {
465 binding->binding = tgsi_processor_to_shader_stage(i);
466 binding->descriptorType = type;
467 binding->descriptorCount = 1;
468 binding->stageFlags = zink_shader_stage(i);
469 binding->pImmutableSamplers = NULL;
470 }
471
472 static VkDescriptorType
get_push_types(struct zink_screen * screen,enum zink_descriptor_type * dsl_type)473 get_push_types(struct zink_screen *screen, enum zink_descriptor_type *dsl_type)
474 {
475 *dsl_type = screen->descriptor_mode == ZINK_DESCRIPTOR_MODE_LAZY &&
476 screen->info.have_KHR_push_descriptor ? ZINK_DESCRIPTOR_TYPES : ZINK_DESCRIPTOR_TYPE_UBO;
477 return screen->descriptor_mode == ZINK_DESCRIPTOR_MODE_LAZY ?
478 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER : VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
479 }
480
481 static struct zink_descriptor_layout *
create_gfx_layout(struct zink_context * ctx,struct zink_descriptor_layout_key ** layout_key,bool fbfetch)482 create_gfx_layout(struct zink_context *ctx, struct zink_descriptor_layout_key **layout_key, bool fbfetch)
483 {
484 struct zink_screen *screen = zink_screen(ctx->base.screen);
485 VkDescriptorSetLayoutBinding bindings[PIPE_SHADER_TYPES];
486 enum zink_descriptor_type dsl_type;
487 VkDescriptorType vktype = get_push_types(screen, &dsl_type);
488 for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++)
489 init_push_binding(&bindings[i], i, vktype);
490 if (fbfetch) {
491 bindings[ZINK_SHADER_COUNT].binding = ZINK_FBFETCH_BINDING;
492 bindings[ZINK_SHADER_COUNT].descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
493 bindings[ZINK_SHADER_COUNT].descriptorCount = 1;
494 bindings[ZINK_SHADER_COUNT].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
495 bindings[ZINK_SHADER_COUNT].pImmutableSamplers = NULL;
496 }
497 return create_layout(ctx, dsl_type, bindings, fbfetch ? ARRAY_SIZE(bindings) : ARRAY_SIZE(bindings) - 1, layout_key);
498 }
499
500 bool
zink_descriptor_util_push_layouts_get(struct zink_context * ctx,struct zink_descriptor_layout ** dsls,struct zink_descriptor_layout_key ** layout_keys)501 zink_descriptor_util_push_layouts_get(struct zink_context *ctx, struct zink_descriptor_layout **dsls, struct zink_descriptor_layout_key **layout_keys)
502 {
503 struct zink_screen *screen = zink_screen(ctx->base.screen);
504 VkDescriptorSetLayoutBinding compute_binding;
505 enum zink_descriptor_type dsl_type;
506 VkDescriptorType vktype = get_push_types(screen, &dsl_type);
507 init_push_binding(&compute_binding, PIPE_SHADER_COMPUTE, vktype);
508 dsls[0] = create_gfx_layout(ctx, &layout_keys[0], false);
509 dsls[1] = create_layout(ctx, dsl_type, &compute_binding, 1, &layout_keys[1]);
510 return dsls[0] && dsls[1];
511 }
512
513 void
zink_descriptor_util_init_null_set(struct zink_context * ctx,VkDescriptorSet desc_set)514 zink_descriptor_util_init_null_set(struct zink_context *ctx, VkDescriptorSet desc_set)
515 {
516 struct zink_screen *screen = zink_screen(ctx->base.screen);
517 VkDescriptorBufferInfo push_info;
518 VkWriteDescriptorSet push_wd;
519 push_wd.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
520 push_wd.pNext = NULL;
521 push_wd.dstBinding = 0;
522 push_wd.dstArrayElement = 0;
523 push_wd.descriptorCount = 1;
524 push_wd.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
525 push_wd.dstSet = desc_set;
526 push_wd.pBufferInfo = &push_info;
527 push_info.buffer = screen->info.rb2_feats.nullDescriptor ?
528 VK_NULL_HANDLE :
529 zink_resource(ctx->dummy_vertex_buffer)->obj->buffer;
530 push_info.offset = 0;
531 push_info.range = VK_WHOLE_SIZE;
532 VKSCR(UpdateDescriptorSets)(screen->dev, 1, &push_wd, 0, NULL);
533 }
534
535 VkImageLayout
zink_descriptor_util_image_layout_eval(const struct zink_resource * res,bool is_compute)536 zink_descriptor_util_image_layout_eval(const struct zink_resource *res, bool is_compute)
537 {
538 if (res->bindless[0] || res->bindless[1]) {
539 /* bindless needs most permissive layout */
540 if (res->image_bind_count[0] || res->image_bind_count[1])
541 return VK_IMAGE_LAYOUT_GENERAL;
542 return VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
543 }
544 return res->image_bind_count[is_compute] ? VK_IMAGE_LAYOUT_GENERAL :
545 res->aspect & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT) ?
546 //Vulkan-Docs#1490
547 //(res->aspect == VK_IMAGE_ASPECT_DEPTH_BIT ? VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL :
548 //res->aspect == VK_IMAGE_ASPECT_STENCIL_BIT ? VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL :
549 (res->aspect == VK_IMAGE_ASPECT_DEPTH_BIT ? VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL :
550 res->aspect == VK_IMAGE_ASPECT_STENCIL_BIT ? VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL :
551 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) :
552 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
553 }
554
555 static uint32_t
hash_descriptor_pool(const void * key)556 hash_descriptor_pool(const void *key)
557 {
558 uint32_t hash = 0;
559 const struct zink_descriptor_pool_key *k = key;
560 hash = XXH32(&k->num_type_sizes, sizeof(unsigned), hash);
561 hash = XXH32(&k->layout, sizeof(k->layout), hash);
562 hash = XXH32(k->sizes, k->num_type_sizes * sizeof(VkDescriptorPoolSize), hash);
563
564 return hash;
565 }
566
567 static bool
equals_descriptor_pool(const void * a,const void * b)568 equals_descriptor_pool(const void *a, const void *b)
569 {
570 const struct zink_descriptor_pool_key *a_k = a;
571 const struct zink_descriptor_pool_key *b_k = b;
572 return a_k->num_type_sizes == b_k->num_type_sizes &&
573 a_k->layout == b_k->layout &&
574 !memcmp(a_k->sizes, b_k->sizes, a_k->num_type_sizes * sizeof(VkDescriptorPoolSize));
575 }
576
577 static struct zink_descriptor_pool *
descriptor_pool_get(struct zink_context * ctx,enum zink_descriptor_type type,struct zink_descriptor_layout_key * layout_key,VkDescriptorPoolSize * sizes,unsigned num_type_sizes)578 descriptor_pool_get(struct zink_context *ctx, enum zink_descriptor_type type,
579 struct zink_descriptor_layout_key *layout_key, VkDescriptorPoolSize *sizes, unsigned num_type_sizes)
580 {
581 uint32_t hash = 0;
582 if (type != ZINK_DESCRIPTOR_TYPES) {
583 struct zink_descriptor_pool_key key = {
584 .layout = layout_key,
585 .num_type_sizes = num_type_sizes,
586 .sizes = sizes,
587 };
588
589 hash = hash_descriptor_pool(&key);
590 struct hash_entry *he = _mesa_hash_table_search_pre_hashed(ctx->dd->descriptor_pools[type], hash, &key);
591 if (he)
592 return (void*)he->data;
593 }
594 struct zink_descriptor_pool *pool = descriptor_pool_create(zink_screen(ctx->base.screen), type, layout_key, sizes, num_type_sizes);
595 if (type != ZINK_DESCRIPTOR_TYPES)
596 _mesa_hash_table_insert_pre_hashed(ctx->dd->descriptor_pools[type], hash, &pool->key, pool);
597 return pool;
598 }
599
600 static bool
get_invalidated_desc_set(struct zink_descriptor_set * zds)601 get_invalidated_desc_set(struct zink_descriptor_set *zds)
602 {
603 if (!zds->invalid)
604 return false;
605 return p_atomic_read(&zds->reference.count) == 1;
606 }
607
608 bool
zink_descriptor_util_alloc_sets(struct zink_screen * screen,VkDescriptorSetLayout dsl,VkDescriptorPool pool,VkDescriptorSet * sets,unsigned num_sets)609 zink_descriptor_util_alloc_sets(struct zink_screen *screen, VkDescriptorSetLayout dsl, VkDescriptorPool pool, VkDescriptorSet *sets, unsigned num_sets)
610 {
611 VkDescriptorSetAllocateInfo dsai;
612 VkDescriptorSetLayout *layouts = alloca(sizeof(*layouts) * num_sets);
613 memset((void *)&dsai, 0, sizeof(dsai));
614 dsai.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
615 dsai.pNext = NULL;
616 dsai.descriptorPool = pool;
617 dsai.descriptorSetCount = num_sets;
618 for (unsigned i = 0; i < num_sets; i ++)
619 layouts[i] = dsl;
620 dsai.pSetLayouts = layouts;
621
622 if (VKSCR(AllocateDescriptorSets)(screen->dev, &dsai, sets) != VK_SUCCESS) {
623 debug_printf("ZINK: %" PRIu64 " failed to allocate descriptor set :/\n", (uint64_t)dsl);
624 return false;
625 }
626 return true;
627 }
628
629 unsigned
zink_descriptor_program_num_sizes(struct zink_program * pg,enum zink_descriptor_type type)630 zink_descriptor_program_num_sizes(struct zink_program *pg, enum zink_descriptor_type type)
631 {
632 switch (type) {
633 case ZINK_DESCRIPTOR_TYPE_UBO:
634 return 1;
635 case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
636 return !!pg->dd->sizes[ZDS_INDEX_COMBINED_SAMPLER].descriptorCount +
637 !!pg->dd->sizes[ZDS_INDEX_UNIFORM_TEXELS].descriptorCount;
638 case ZINK_DESCRIPTOR_TYPE_SSBO:
639 return 1;
640 case ZINK_DESCRIPTOR_TYPE_IMAGE:
641 return !!pg->dd->sizes[ZDS_INDEX_STORAGE_IMAGE].descriptorCount +
642 !!pg->dd->sizes[ZDS_INDEX_STORAGE_TEXELS].descriptorCount;
643 default: break;
644 }
645 unreachable("unknown type");
646 }
647
648 static struct zink_descriptor_set *
allocate_desc_set(struct zink_context * ctx,struct zink_program * pg,enum zink_descriptor_type type,unsigned descs_used,bool is_compute)649 allocate_desc_set(struct zink_context *ctx, struct zink_program *pg, enum zink_descriptor_type type, unsigned descs_used, bool is_compute)
650 {
651 struct zink_screen *screen = zink_screen(ctx->base.screen);
652 bool push_set = type == ZINK_DESCRIPTOR_TYPES;
653 struct zink_descriptor_pool *pool = push_set ? ctx->dd->push_pool[is_compute] : pdd_cached(pg)->pool[type];
654 #define DESC_BUCKET_FACTOR 10
655 unsigned bucket_size = pool->key.layout->num_descriptors ? DESC_BUCKET_FACTOR : 1;
656 if (pool->key.layout->num_descriptors) {
657 for (unsigned desc_factor = DESC_BUCKET_FACTOR; desc_factor < descs_used; desc_factor *= DESC_BUCKET_FACTOR)
658 bucket_size = desc_factor;
659 }
660 /* never grow more than this many at a time */
661 bucket_size = MIN2(bucket_size, ZINK_DEFAULT_MAX_DESCS);
662 VkDescriptorSet *desc_set = alloca(sizeof(*desc_set) * bucket_size);
663 if (!zink_descriptor_util_alloc_sets(screen, push_set ? ctx->dd->push_dsl[is_compute]->layout : pg->dsl[type + 1], pool->descpool, desc_set, bucket_size))
664 return VK_NULL_HANDLE;
665
666 struct zink_descriptor_set *alloc = ralloc_array(pool, struct zink_descriptor_set, bucket_size);
667 assert(alloc);
668 unsigned num_resources = pool->num_resources;
669 struct zink_resource_object **res_objs = NULL;
670 void **samplers = NULL;
671 struct zink_descriptor_surface *surfaces = NULL;
672 switch (type) {
673 case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
674 samplers = rzalloc_array(pool, void*, num_resources * bucket_size);
675 assert(samplers);
676 FALLTHROUGH;
677 case ZINK_DESCRIPTOR_TYPE_IMAGE:
678 surfaces = rzalloc_array(pool, struct zink_descriptor_surface, num_resources * bucket_size);
679 assert(surfaces);
680 break;
681 default:
682 res_objs = rzalloc_array(pool, struct zink_resource_object*, num_resources * bucket_size);
683 assert(res_objs);
684 break;
685 }
686 for (unsigned i = 0; i < bucket_size; i ++) {
687 struct zink_descriptor_set *zds = &alloc[i];
688 pipe_reference_init(&zds->reference, 1);
689 zds->pool = pool;
690 zds->hash = 0;
691 zds->batch_uses = NULL;
692 zds->invalid = true;
693 zds->punted = zds->recycled = false;
694 #ifndef NDEBUG
695 zds->num_resources = num_resources;
696 #endif
697 switch (type) {
698 case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
699 zds->sampler_states = (struct zink_sampler_state**)&samplers[i * pool->key.layout->num_descriptors];
700 FALLTHROUGH;
701 case ZINK_DESCRIPTOR_TYPE_IMAGE:
702 zds->surfaces = &surfaces[i * pool->key.layout->num_descriptors];
703 break;
704 default:
705 zds->res_objs = (struct zink_resource_object**)&res_objs[i * pool->key.layout->num_descriptors];
706 break;
707 }
708 zds->desc_set = desc_set[i];
709 if (i > 0)
710 util_dynarray_append(&pool->alloc_desc_sets, struct zink_descriptor_set *, zds);
711 }
712 pool->num_sets_allocated += bucket_size;
713 return alloc;
714 }
715
716 static void
populate_zds_key(struct zink_context * ctx,enum zink_descriptor_type type,bool is_compute,struct zink_descriptor_state_key * key,uint32_t push_usage)717 populate_zds_key(struct zink_context *ctx, enum zink_descriptor_type type, bool is_compute,
718 struct zink_descriptor_state_key *key, uint32_t push_usage)
719 {
720 if (is_compute) {
721 for (unsigned i = 1; i < ZINK_SHADER_COUNT; i++)
722 key->exists[i] = false;
723 key->exists[0] = true;
724 if (type == ZINK_DESCRIPTOR_TYPES)
725 key->state[0] = ctx->dd->push_state[is_compute];
726 else {
727 assert(ctx->dd->descriptor_states[is_compute].valid[type]);
728 key->state[0] = ctx->dd->descriptor_states[is_compute].state[type];
729 }
730 } else if (type == ZINK_DESCRIPTOR_TYPES) {
731 /* gfx only */
732 for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++) {
733 if (push_usage & BITFIELD_BIT(i)) {
734 key->exists[i] = true;
735 key->state[i] = ctx->dd->gfx_push_state[i];
736 } else
737 key->exists[i] = false;
738 }
739 } else {
740 for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++) {
741 key->exists[i] = ctx->dd->gfx_descriptor_states[i].valid[type];
742 key->state[i] = ctx->dd->gfx_descriptor_states[i].state[type];
743 }
744 }
745 }
746
747 static void
punt_invalid_set(struct zink_descriptor_set * zds,struct hash_entry * he)748 punt_invalid_set(struct zink_descriptor_set *zds, struct hash_entry *he)
749 {
750 /* this is no longer usable, so we punt it for now until it gets recycled */
751 assert(!zds->recycled);
752 if (!he)
753 he = _mesa_hash_table_search_pre_hashed(zds->pool->desc_sets, zds->hash, &zds->key);
754 _mesa_hash_table_remove(zds->pool->desc_sets, he);
755 zds->punted = true;
756 }
757
758 static struct zink_descriptor_set *
zink_descriptor_set_get(struct zink_context * ctx,enum zink_descriptor_type type,bool is_compute,bool * cache_hit)759 zink_descriptor_set_get(struct zink_context *ctx,
760 enum zink_descriptor_type type,
761 bool is_compute,
762 bool *cache_hit)
763 {
764 *cache_hit = false;
765 struct zink_descriptor_set *zds;
766 struct zink_program *pg = is_compute ? (struct zink_program *)ctx->curr_compute : (struct zink_program *)ctx->curr_program;
767 struct zink_batch *batch = &ctx->batch;
768 bool push_set = type == ZINK_DESCRIPTOR_TYPES;
769 struct zink_descriptor_pool *pool = push_set ? ctx->dd->push_pool[is_compute] : pdd_cached(pg)->pool[type];
770 unsigned descs_used = 1;
771 assert(type <= ZINK_DESCRIPTOR_TYPES);
772
773 assert(pool->key.layout->num_descriptors);
774 uint32_t hash = push_set ? ctx->dd->push_state[is_compute] :
775 ctx->dd->descriptor_states[is_compute].state[type];
776
777 struct zink_descriptor_set *last_set = push_set ? ctx->dd->last_set[is_compute] : pdd_cached(pg)->last_set[type];
778 /* if the current state hasn't changed since the last time it was used,
779 * it's impossible for this set to not be valid, which means that an
780 * early return here can be done safely and with no locking
781 */
782 if (last_set && ((push_set && !ctx->dd->changed[is_compute][ZINK_DESCRIPTOR_TYPES]) ||
783 (!push_set && !ctx->dd->changed[is_compute][type]))) {
784 *cache_hit = true;
785 return last_set;
786 }
787
788 struct zink_descriptor_state_key key;
789 populate_zds_key(ctx, type, is_compute, &key, pg->dd->push_usage);
790
791 simple_mtx_lock(&pool->mtx);
792 if (last_set && last_set->hash == hash && desc_state_equal(&last_set->key, &key)) {
793 zds = last_set;
794 *cache_hit = !zds->invalid;
795 if (zds->recycled) {
796 struct hash_entry *he = _mesa_hash_table_search_pre_hashed(pool->free_desc_sets, hash, &key);
797 if (he)
798 _mesa_hash_table_remove(pool->free_desc_sets, he);
799 zds->recycled = false;
800 }
801 if (zds->invalid) {
802 if (zink_batch_usage_exists(zds->batch_uses))
803 punt_invalid_set(zds, NULL);
804 else
805 /* this set is guaranteed to be in pool->alloc_desc_sets */
806 goto skip_hash_tables;
807 zds = NULL;
808 }
809 if (zds)
810 goto out;
811 }
812
813 struct hash_entry *he = _mesa_hash_table_search_pre_hashed(pool->desc_sets, hash, &key);
814 bool recycled = false, punted = false;
815 if (he) {
816 zds = (void*)he->data;
817 if (zds->invalid && zink_batch_usage_exists(zds->batch_uses)) {
818 punt_invalid_set(zds, he);
819 zds = NULL;
820 punted = true;
821 }
822 }
823 if (!he) {
824 he = _mesa_hash_table_search_pre_hashed(pool->free_desc_sets, hash, &key);
825 recycled = true;
826 }
827 if (he && !punted) {
828 zds = (void*)he->data;
829 *cache_hit = !zds->invalid;
830 if (recycled) {
831 /* need to migrate this entry back to the in-use hash */
832 _mesa_hash_table_remove(pool->free_desc_sets, he);
833 goto out;
834 }
835 goto quick_out;
836 }
837 skip_hash_tables:
838 if (util_dynarray_num_elements(&pool->alloc_desc_sets, struct zink_descriptor_set *)) {
839 /* grab one off the allocated array */
840 zds = util_dynarray_pop(&pool->alloc_desc_sets, struct zink_descriptor_set *);
841 goto out;
842 }
843
844 if (_mesa_hash_table_num_entries(pool->free_desc_sets)) {
845 /* try for an invalidated set first */
846 unsigned count = 0;
847 hash_table_foreach(pool->free_desc_sets, he) {
848 struct zink_descriptor_set *tmp = he->data;
849 if ((count++ >= 100 && tmp->reference.count == 1) || get_invalidated_desc_set(he->data)) {
850 zds = tmp;
851 assert(p_atomic_read(&zds->reference.count) == 1);
852 descriptor_set_invalidate(zds);
853 _mesa_hash_table_remove(pool->free_desc_sets, he);
854 goto out;
855 }
856 }
857 }
858
859 assert(pool->num_sets_allocated < ZINK_DEFAULT_MAX_DESCS);
860
861 zds = allocate_desc_set(ctx, pg, type, descs_used, is_compute);
862 out:
863 if (unlikely(pool->num_sets_allocated >= ZINK_DEFAULT_DESC_CLAMP &&
864 _mesa_hash_table_num_entries(pool->free_desc_sets) < ZINK_DEFAULT_MAX_DESCS - ZINK_DEFAULT_DESC_CLAMP))
865 ctx->oom_flush = ctx->oom_stall = true;
866 zds->hash = hash;
867 populate_zds_key(ctx, type, is_compute, &zds->key, pg->dd->push_usage);
868 zds->recycled = false;
869 _mesa_hash_table_insert_pre_hashed(pool->desc_sets, hash, &zds->key, zds);
870 quick_out:
871 zds->punted = zds->invalid = false;
872 batch_add_desc_set(batch, zds);
873 if (push_set)
874 ctx->dd->last_set[is_compute] = zds;
875 else
876 pdd_cached(pg)->last_set[type] = zds;
877 simple_mtx_unlock(&pool->mtx);
878
879 return zds;
880 }
881
882 void
zink_descriptor_set_recycle(struct zink_descriptor_set * zds)883 zink_descriptor_set_recycle(struct zink_descriptor_set *zds)
884 {
885 struct zink_descriptor_pool *pool = zds->pool;
886 /* if desc set is still in use by a batch, don't recache */
887 uint32_t refcount = p_atomic_read(&zds->reference.count);
888 if (refcount != 1)
889 return;
890 /* this is a null set */
891 if (!pool->key.layout->num_descriptors)
892 return;
893 simple_mtx_lock(&pool->mtx);
894 if (zds->punted)
895 zds->invalid = true;
896 else {
897 /* if we've previously punted this set, then it won't have a hash or be in either of the tables */
898 struct hash_entry *he = _mesa_hash_table_search_pre_hashed(pool->desc_sets, zds->hash, &zds->key);
899 if (!he) {
900 /* desc sets can be used multiple times in the same batch */
901 simple_mtx_unlock(&pool->mtx);
902 return;
903 }
904 _mesa_hash_table_remove(pool->desc_sets, he);
905 }
906
907 if (zds->invalid) {
908 descriptor_set_invalidate(zds);
909 util_dynarray_append(&pool->alloc_desc_sets, struct zink_descriptor_set *, zds);
910 } else {
911 zds->recycled = true;
912 _mesa_hash_table_insert_pre_hashed(pool->free_desc_sets, zds->hash, &zds->key, zds);
913 }
914 simple_mtx_unlock(&pool->mtx);
915 }
916
917
918 static void
desc_set_ref_add(struct zink_descriptor_set * zds,struct zink_descriptor_refs * refs,void ** ref_ptr,void * ptr)919 desc_set_ref_add(struct zink_descriptor_set *zds, struct zink_descriptor_refs *refs, void **ref_ptr, void *ptr)
920 {
921 struct zink_descriptor_reference ref = {ref_ptr, &zds->invalid};
922 *ref_ptr = ptr;
923 if (ptr)
924 util_dynarray_append(&refs->refs, struct zink_descriptor_reference, ref);
925 }
926
927 static void
zink_descriptor_surface_desc_set_add(struct zink_descriptor_surface * dsurf,struct zink_descriptor_set * zds,unsigned idx)928 zink_descriptor_surface_desc_set_add(struct zink_descriptor_surface *dsurf, struct zink_descriptor_set *zds, unsigned idx)
929 {
930 assert(idx < zds->num_resources);
931 zds->surfaces[idx].is_buffer = dsurf->is_buffer;
932 if (dsurf->is_buffer)
933 desc_set_ref_add(zds, &dsurf->bufferview->desc_set_refs, (void**)&zds->surfaces[idx].bufferview, dsurf->bufferview);
934 else
935 desc_set_ref_add(zds, &dsurf->surface->desc_set_refs, (void**)&zds->surfaces[idx].surface, dsurf->surface);
936 }
937
938 static void
zink_image_view_desc_set_add(struct zink_image_view * image_view,struct zink_descriptor_set * zds,unsigned idx,bool is_buffer)939 zink_image_view_desc_set_add(struct zink_image_view *image_view, struct zink_descriptor_set *zds, unsigned idx, bool is_buffer)
940 {
941 assert(idx < zds->num_resources);
942 if (is_buffer)
943 desc_set_ref_add(zds, &image_view->buffer_view->desc_set_refs, (void**)&zds->surfaces[idx].bufferview, image_view->buffer_view);
944 else
945 desc_set_ref_add(zds, &image_view->surface->desc_set_refs, (void**)&zds->surfaces[idx].surface, image_view->surface);
946 }
947
948 static void
zink_sampler_state_desc_set_add(struct zink_sampler_state * sampler_state,struct zink_descriptor_set * zds,unsigned idx)949 zink_sampler_state_desc_set_add(struct zink_sampler_state *sampler_state, struct zink_descriptor_set *zds, unsigned idx)
950 {
951 assert(idx < zds->num_resources);
952 if (sampler_state)
953 desc_set_ref_add(zds, &sampler_state->desc_set_refs, (void**)&zds->sampler_states[idx], sampler_state);
954 else
955 zds->sampler_states[idx] = NULL;
956 }
957
958 static void
zink_resource_desc_set_add(struct zink_resource * res,struct zink_descriptor_set * zds,unsigned idx)959 zink_resource_desc_set_add(struct zink_resource *res, struct zink_descriptor_set *zds, unsigned idx)
960 {
961 assert(idx < zds->num_resources);
962 desc_set_ref_add(zds, res ? &res->obj->desc_set_refs : NULL, (void**)&zds->res_objs[idx], res ? res->obj : NULL);
963 }
964
965 void
zink_descriptor_set_refs_clear(struct zink_descriptor_refs * refs,void * ptr)966 zink_descriptor_set_refs_clear(struct zink_descriptor_refs *refs, void *ptr)
967 {
968 util_dynarray_foreach(&refs->refs, struct zink_descriptor_reference, ref) {
969 if (*ref->ref == ptr) {
970 *ref->invalid = true;
971 *ref->ref = NULL;
972 }
973 }
974 util_dynarray_fini(&refs->refs);
975 }
976
977 static inline void
zink_descriptor_pool_reference(struct zink_screen * screen,struct zink_descriptor_pool ** dst,struct zink_descriptor_pool * src)978 zink_descriptor_pool_reference(struct zink_screen *screen,
979 struct zink_descriptor_pool **dst,
980 struct zink_descriptor_pool *src)
981 {
982 struct zink_descriptor_pool *old_dst = dst ? *dst : NULL;
983
984 if (pipe_reference_described(old_dst ? &old_dst->reference : NULL, &src->reference,
985 (debug_reference_descriptor)debug_describe_zink_descriptor_pool))
986 descriptor_pool_free(screen, old_dst);
987 if (dst) *dst = src;
988 }
989
990 static void
create_descriptor_ref_template(struct zink_context * ctx,struct zink_program * pg,enum zink_descriptor_type type)991 create_descriptor_ref_template(struct zink_context *ctx, struct zink_program *pg, enum zink_descriptor_type type)
992 {
993 struct zink_shader **stages;
994 if (pg->is_compute)
995 stages = &((struct zink_compute_program*)pg)->shader;
996 else
997 stages = ((struct zink_gfx_program*)pg)->shaders;
998 unsigned num_shaders = pg->is_compute ? 1 : ZINK_SHADER_COUNT;
999
1000 for (int i = 0; i < num_shaders; i++) {
1001 struct zink_shader *shader = stages[i];
1002 if (!shader)
1003 continue;
1004
1005 for (int j = 0; j < shader->num_bindings[type]; j++) {
1006 int index = shader->bindings[type][j].index;
1007 if (type == ZINK_DESCRIPTOR_TYPE_UBO && !index)
1008 continue;
1009 pdd_cached(pg)->num_refs[type] += shader->bindings[type][j].size;
1010 }
1011 }
1012
1013 pdd_cached(pg)->refs[type] = ralloc_array(pg->dd, union zink_program_descriptor_refs, pdd_cached(pg)->num_refs[type]);
1014 if (!pdd_cached(pg)->refs[type])
1015 return;
1016
1017 unsigned ref_idx = 0;
1018 for (int i = 0; i < num_shaders; i++) {
1019 struct zink_shader *shader = stages[i];
1020 if (!shader)
1021 continue;
1022
1023 enum pipe_shader_type stage = pipe_shader_type_from_mesa(shader->nir->info.stage);
1024 for (int j = 0; j < shader->num_bindings[type]; j++) {
1025 int index = shader->bindings[type][j].index;
1026 for (unsigned k = 0; k < shader->bindings[type][j].size; k++) {
1027 switch (type) {
1028 case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
1029 pdd_cached(pg)->refs[type][ref_idx].sampler.sampler_state = (struct zink_sampler_state**)&ctx->sampler_states[stage][index + k];
1030 pdd_cached(pg)->refs[type][ref_idx].sampler.dsurf = &ctx->di.sampler_surfaces[stage][index + k];
1031 break;
1032 case ZINK_DESCRIPTOR_TYPE_IMAGE:
1033 pdd_cached(pg)->refs[type][ref_idx].dsurf = &ctx->di.image_surfaces[stage][index + k];
1034 break;
1035 case ZINK_DESCRIPTOR_TYPE_UBO:
1036 if (!index)
1037 continue;
1038 FALLTHROUGH;
1039 default:
1040 pdd_cached(pg)->refs[type][ref_idx].res = &ctx->di.descriptor_res[type][stage][index + k];
1041 break;
1042 }
1043 assert(ref_idx < pdd_cached(pg)->num_refs[type]);
1044 ref_idx++;
1045 }
1046 }
1047 }
1048 }
1049
1050 bool
zink_descriptor_program_init(struct zink_context * ctx,struct zink_program * pg)1051 zink_descriptor_program_init(struct zink_context *ctx, struct zink_program *pg)
1052 {
1053 struct zink_screen *screen = zink_screen(ctx->base.screen);
1054
1055 pg->dd = (void*)rzalloc(pg, struct zink_program_descriptor_data_cached);
1056 if (!pg->dd)
1057 return false;
1058
1059 if (!zink_descriptor_program_init_lazy(ctx, pg))
1060 return false;
1061
1062 /* no descriptors */
1063 if (!pg->dd)
1064 return true;
1065
1066 for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
1067 if (!pg->dd->layout_key[i])
1068 continue;
1069
1070 unsigned idx = zink_descriptor_type_to_size_idx(i);
1071 VkDescriptorPoolSize *size = &pg->dd->sizes[idx];
1072 /* this is a sampler/image set with no images only texels */
1073 if (!size->descriptorCount)
1074 size++;
1075 unsigned num_sizes = zink_descriptor_program_num_sizes(pg, i);
1076 struct zink_descriptor_pool *pool = descriptor_pool_get(ctx, i, pg->dd->layout_key[i], size, num_sizes);
1077 if (!pool)
1078 return false;
1079 zink_descriptor_pool_reference(screen, &pdd_cached(pg)->pool[i], pool);
1080
1081 if (screen->info.have_KHR_descriptor_update_template &&
1082 screen->descriptor_mode != ZINK_DESCRIPTOR_MODE_NOTEMPLATES)
1083 create_descriptor_ref_template(ctx, pg, i);
1084 }
1085
1086 return true;
1087 }
1088
1089 void
zink_descriptor_program_deinit(struct zink_screen * screen,struct zink_program * pg)1090 zink_descriptor_program_deinit(struct zink_screen *screen, struct zink_program *pg)
1091 {
1092 if (!pg->dd)
1093 return;
1094 for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++)
1095 zink_descriptor_pool_reference(screen, &pdd_cached(pg)->pool[i], NULL);
1096
1097 zink_descriptor_program_deinit_lazy(screen, pg);
1098 }
1099
1100 static void
zink_descriptor_pool_deinit(struct zink_context * ctx)1101 zink_descriptor_pool_deinit(struct zink_context *ctx)
1102 {
1103 struct zink_screen *screen = zink_screen(ctx->base.screen);
1104 for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
1105 hash_table_foreach(ctx->dd->descriptor_pools[i], entry) {
1106 struct zink_descriptor_pool *pool = (void*)entry->data;
1107 zink_descriptor_pool_reference(screen, &pool, NULL);
1108 }
1109 _mesa_hash_table_destroy(ctx->dd->descriptor_pools[i], NULL);
1110 }
1111 }
1112
1113 static bool
zink_descriptor_pool_init(struct zink_context * ctx)1114 zink_descriptor_pool_init(struct zink_context *ctx)
1115 {
1116 for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
1117 ctx->dd->descriptor_pools[i] = _mesa_hash_table_create(ctx, hash_descriptor_pool, equals_descriptor_pool);
1118 if (!ctx->dd->descriptor_pools[i])
1119 return false;
1120 }
1121 struct zink_screen *screen = zink_screen(ctx->base.screen);
1122 VkDescriptorPoolSize sizes[2];
1123 sizes[0].type = screen->descriptor_mode == ZINK_DESCRIPTOR_MODE_LAZY ? VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER : VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
1124 sizes[0].descriptorCount = ZINK_SHADER_COUNT * ZINK_DEFAULT_MAX_DESCS;
1125 sizes[1].type = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
1126 sizes[1].descriptorCount = ZINK_DEFAULT_MAX_DESCS;
1127 ctx->dd->push_pool[0] = descriptor_pool_get(ctx, 0, ctx->dd->push_layout_keys[0], sizes, ctx->dd->has_fbfetch ? 2 : 1);
1128 sizes[0].descriptorCount = ZINK_DEFAULT_MAX_DESCS;
1129 ctx->dd->push_pool[1] = descriptor_pool_get(ctx, 0, ctx->dd->push_layout_keys[1], sizes, 1);
1130 return ctx->dd->push_pool[0] && ctx->dd->push_pool[1];
1131 }
1132
1133
1134 static void
desc_set_res_add(struct zink_descriptor_set * zds,struct zink_resource * res,unsigned int i,bool cache_hit)1135 desc_set_res_add(struct zink_descriptor_set *zds, struct zink_resource *res, unsigned int i, bool cache_hit)
1136 {
1137 /* if we got a cache hit, we have to verify that the cached set is still valid;
1138 * we store the vk resource to the set here to avoid a more complex and costly mechanism of maintaining a
1139 * hash table on every resource with the associated descriptor sets that then needs to be iterated through
1140 * whenever a resource is destroyed
1141 */
1142 assert(!cache_hit || zds->res_objs[i] == (res ? res->obj : NULL));
1143 if (!cache_hit)
1144 zink_resource_desc_set_add(res, zds, i);
1145 }
1146
1147 static void
desc_set_sampler_add(struct zink_context * ctx,struct zink_descriptor_set * zds,struct zink_descriptor_surface * dsurf,struct zink_sampler_state * state,unsigned int i,bool cache_hit)1148 desc_set_sampler_add(struct zink_context *ctx, struct zink_descriptor_set *zds, struct zink_descriptor_surface *dsurf,
1149 struct zink_sampler_state *state, unsigned int i, bool cache_hit)
1150 {
1151 /* if we got a cache hit, we have to verify that the cached set is still valid;
1152 * we store the vk resource to the set here to avoid a more complex and costly mechanism of maintaining a
1153 * hash table on every resource with the associated descriptor sets that then needs to be iterated through
1154 * whenever a resource is destroyed
1155 */
1156 #ifndef NDEBUG
1157 uint32_t cur_hash = get_descriptor_surface_hash(ctx, &zds->surfaces[i]);
1158 uint32_t new_hash = get_descriptor_surface_hash(ctx, dsurf);
1159 #endif
1160 assert(!cache_hit || cur_hash == new_hash);
1161 assert(!cache_hit || zds->sampler_states[i] == state);
1162 if (!cache_hit) {
1163 zink_descriptor_surface_desc_set_add(dsurf, zds, i);
1164 zink_sampler_state_desc_set_add(state, zds, i);
1165 }
1166 }
1167
1168 static void
desc_set_image_add(struct zink_context * ctx,struct zink_descriptor_set * zds,struct zink_image_view * image_view,unsigned int i,bool is_buffer,bool cache_hit)1169 desc_set_image_add(struct zink_context *ctx, struct zink_descriptor_set *zds, struct zink_image_view *image_view,
1170 unsigned int i, bool is_buffer, bool cache_hit)
1171 {
1172 /* if we got a cache hit, we have to verify that the cached set is still valid;
1173 * we store the vk resource to the set here to avoid a more complex and costly mechanism of maintaining a
1174 * hash table on every resource with the associated descriptor sets that then needs to be iterated through
1175 * whenever a resource is destroyed
1176 */
1177 #ifndef NDEBUG
1178 uint32_t cur_hash = get_descriptor_surface_hash(ctx, &zds->surfaces[i]);
1179 uint32_t new_hash = zink_get_image_view_hash(ctx, image_view, is_buffer);
1180 #endif
1181 assert(!cache_hit || cur_hash == new_hash);
1182 if (!cache_hit)
1183 zink_image_view_desc_set_add(image_view, zds, i, is_buffer);
1184 }
1185
1186 static void
desc_set_descriptor_surface_add(struct zink_context * ctx,struct zink_descriptor_set * zds,struct zink_descriptor_surface * dsurf,unsigned int i,bool cache_hit)1187 desc_set_descriptor_surface_add(struct zink_context *ctx, struct zink_descriptor_set *zds, struct zink_descriptor_surface *dsurf,
1188 unsigned int i, bool cache_hit)
1189 {
1190 /* if we got a cache hit, we have to verify that the cached set is still valid;
1191 * we store the vk resource to the set here to avoid a more complex and costly mechanism of maintaining a
1192 * hash table on every resource with the associated descriptor sets that then needs to be iterated through
1193 * whenever a resource is destroyed
1194 */
1195 #ifndef NDEBUG
1196 uint32_t cur_hash = get_descriptor_surface_hash(ctx, &zds->surfaces[i]);
1197 uint32_t new_hash = get_descriptor_surface_hash(ctx, dsurf);
1198 #endif
1199 assert(!cache_hit || cur_hash == new_hash);
1200 if (!cache_hit)
1201 zink_descriptor_surface_desc_set_add(dsurf, zds, i);
1202 }
1203
1204 static unsigned
init_write_descriptor(struct zink_shader * shader,VkDescriptorSet desc_set,enum zink_descriptor_type type,int idx,VkWriteDescriptorSet * wd,unsigned num_wds)1205 init_write_descriptor(struct zink_shader *shader, VkDescriptorSet desc_set, enum zink_descriptor_type type, int idx, VkWriteDescriptorSet *wd, unsigned num_wds)
1206 {
1207 wd->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
1208 wd->pNext = NULL;
1209 wd->dstBinding = shader ? shader->bindings[type][idx].binding : idx;
1210 wd->dstArrayElement = 0;
1211 wd->descriptorCount = shader ? shader->bindings[type][idx].size : 1;
1212 wd->descriptorType = shader ? shader->bindings[type][idx].type :
1213 idx == ZINK_FBFETCH_BINDING ? VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT : VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
1214 wd->dstSet = desc_set;
1215 return num_wds + 1;
1216 }
1217
1218 static unsigned
update_push_ubo_descriptors(struct zink_context * ctx,struct zink_descriptor_set * zds,VkDescriptorSet desc_set,bool is_compute,bool cache_hit,uint32_t * dynamic_offsets)1219 update_push_ubo_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds,
1220 VkDescriptorSet desc_set,
1221 bool is_compute, bool cache_hit, uint32_t *dynamic_offsets)
1222 {
1223 struct zink_screen *screen = zink_screen(ctx->base.screen);
1224 VkWriteDescriptorSet wds[ZINK_SHADER_COUNT + 1];
1225 VkDescriptorBufferInfo buffer_infos[ZINK_SHADER_COUNT];
1226 struct zink_shader **stages;
1227 bool fbfetch = false;
1228
1229 unsigned num_stages = is_compute ? 1 : ZINK_SHADER_COUNT;
1230 struct zink_program *pg = is_compute ? &ctx->curr_compute->base : &ctx->curr_program->base;
1231 if (is_compute)
1232 stages = &ctx->curr_compute->shader;
1233 else
1234 stages = &ctx->gfx_stages[0];
1235
1236 for (int i = 0; i < num_stages; i++) {
1237 struct zink_shader *shader = stages[i];
1238 enum pipe_shader_type pstage = shader ? pipe_shader_type_from_mesa(shader->nir->info.stage) : i;
1239 VkDescriptorBufferInfo *info = &ctx->di.ubos[pstage][0];
1240 unsigned dynamic_idx = is_compute ? 0 : tgsi_processor_to_shader_stage(pstage);
1241
1242 /* Values are taken from pDynamicOffsets in an order such that all entries for set N come before set N+1;
1243 * within a set, entries are ordered by the binding numbers in the descriptor set layouts
1244 * - vkCmdBindDescriptorSets spec
1245 *
1246 * because of this, we have to populate the dynamic offsets by their shader stage to ensure they
1247 * match what the driver expects
1248 */
1249 const bool used = (pg->dd->push_usage & BITFIELD_BIT(pstage)) == BITFIELD_BIT(pstage);
1250 dynamic_offsets[dynamic_idx] = used ? info->offset : 0;
1251 if (!cache_hit) {
1252 init_write_descriptor(NULL, desc_set, ZINK_DESCRIPTOR_TYPE_UBO, tgsi_processor_to_shader_stage(pstage), &wds[i], 0);
1253 if (used) {
1254 if (zds)
1255 desc_set_res_add(zds, ctx->di.descriptor_res[ZINK_DESCRIPTOR_TYPE_UBO][pstage][0], i, cache_hit);
1256 buffer_infos[i].buffer = info->buffer;
1257 buffer_infos[i].range = info->range;
1258 } else {
1259 if (zds)
1260 desc_set_res_add(zds, NULL, i, cache_hit);
1261 if (unlikely(!screen->info.rb2_feats.nullDescriptor))
1262 buffer_infos[i].buffer = zink_resource(ctx->dummy_vertex_buffer)->obj->buffer;
1263 else
1264 buffer_infos[i].buffer = VK_NULL_HANDLE;
1265 buffer_infos[i].range = VK_WHOLE_SIZE;
1266 }
1267 /* these are dynamic UBO descriptors, so we have to always set 0 as the descriptor offset */
1268 buffer_infos[i].offset = 0;
1269 wds[i].pBufferInfo = &buffer_infos[i];
1270 }
1271 }
1272 if (unlikely(!cache_hit && !is_compute && ctx->dd->has_fbfetch)) {
1273 init_write_descriptor(NULL, desc_set, 0, MESA_SHADER_STAGES, &wds[ZINK_SHADER_COUNT], 0);
1274 wds[ZINK_SHADER_COUNT].pImageInfo = &ctx->di.fbfetch;
1275 fbfetch = true;
1276 }
1277
1278 if (!cache_hit)
1279 VKSCR(UpdateDescriptorSets)(screen->dev, num_stages + !!fbfetch, wds, 0, NULL);
1280 return num_stages;
1281 }
1282
1283 static void
set_descriptor_set_refs(struct zink_context * ctx,struct zink_descriptor_set * zds,struct zink_program * pg,bool cache_hit)1284 set_descriptor_set_refs(struct zink_context *ctx, struct zink_descriptor_set *zds, struct zink_program *pg, bool cache_hit)
1285 {
1286 enum zink_descriptor_type type = zds->pool->type;
1287 for (unsigned i = 0; i < pdd_cached(pg)->num_refs[type]; i++) {
1288 switch (type) {
1289 case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
1290 desc_set_sampler_add(ctx, zds, pdd_cached(pg)->refs[type][i].sampler.dsurf,
1291 *pdd_cached(pg)->refs[type][i].sampler.sampler_state, i, cache_hit);
1292 break;
1293 case ZINK_DESCRIPTOR_TYPE_IMAGE:
1294 desc_set_descriptor_surface_add(ctx, zds, pdd_cached(pg)->refs[type][i].dsurf, i, cache_hit);
1295 break;
1296 default:
1297 desc_set_res_add(zds, *pdd_cached(pg)->refs[type][i].res, i, cache_hit);
1298 break;
1299 }
1300 }
1301 }
1302
1303 static void
update_descriptors_internal(struct zink_context * ctx,enum zink_descriptor_type type,struct zink_descriptor_set * zds,struct zink_program * pg,bool cache_hit)1304 update_descriptors_internal(struct zink_context *ctx, enum zink_descriptor_type type, struct zink_descriptor_set *zds, struct zink_program *pg, bool cache_hit)
1305 {
1306 struct zink_screen *screen = zink_screen(ctx->base.screen);
1307 struct zink_shader **stages;
1308
1309 unsigned num_stages = pg->is_compute ? 1 : ZINK_SHADER_COUNT;
1310 if (pg->is_compute)
1311 stages = &ctx->curr_compute->shader;
1312 else
1313 stages = &ctx->gfx_stages[0];
1314
1315 if (cache_hit || !zds)
1316 return;
1317
1318 if (screen->info.have_KHR_descriptor_update_template &&
1319 screen->descriptor_mode != ZINK_DESCRIPTOR_MODE_NOTEMPLATES) {
1320 set_descriptor_set_refs(ctx, zds, pg, cache_hit);
1321 zink_descriptor_set_update_lazy(ctx, pg, type, zds->desc_set);
1322 return;
1323 }
1324
1325 unsigned num_resources = 0;
1326 ASSERTED unsigned num_bindings = zds->pool->num_resources;
1327 VkWriteDescriptorSet wds[ZINK_MAX_DESCRIPTORS_PER_TYPE];
1328 unsigned num_wds = 0;
1329
1330 for (int i = 0; i < num_stages; i++) {
1331 struct zink_shader *shader = stages[i];
1332 if (!shader)
1333 continue;
1334 enum pipe_shader_type stage = pipe_shader_type_from_mesa(shader->nir->info.stage);
1335 for (int j = 0; j < shader->num_bindings[type]; j++) {
1336 int index = shader->bindings[type][j].index;
1337 switch (type) {
1338 case ZINK_DESCRIPTOR_TYPE_UBO:
1339 if (!index)
1340 continue;
1341 FALLTHROUGH;
1342 case ZINK_DESCRIPTOR_TYPE_SSBO: {
1343 VkDescriptorBufferInfo *info;
1344 struct zink_resource *res = ctx->di.descriptor_res[type][stage][index];
1345 if (type == ZINK_DESCRIPTOR_TYPE_UBO)
1346 info = &ctx->di.ubos[stage][index];
1347 else
1348 info = &ctx->di.ssbos[stage][index];
1349 assert(num_resources < num_bindings);
1350 desc_set_res_add(zds, res, num_resources++, cache_hit);
1351 wds[num_wds].pBufferInfo = info;
1352 }
1353 break;
1354 case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
1355 case ZINK_DESCRIPTOR_TYPE_IMAGE: {
1356 VkDescriptorImageInfo *image_info;
1357 VkBufferView *buffer_info;
1358 if (type == ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW) {
1359 image_info = &ctx->di.textures[stage][index];
1360 buffer_info = &ctx->di.tbos[stage][index];
1361 } else {
1362 image_info = &ctx->di.images[stage][index];
1363 buffer_info = &ctx->di.texel_images[stage][index];
1364 }
1365 bool is_buffer = zink_shader_descriptor_is_buffer(shader, type, j);
1366 for (unsigned k = 0; k < shader->bindings[type][j].size; k++) {
1367 assert(num_resources < num_bindings);
1368 if (type == ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW) {
1369 struct zink_sampler_state *sampler = NULL;
1370 if (!is_buffer && image_info->imageView)
1371 sampler = ctx->sampler_states[stage][index + k];;
1372
1373 desc_set_sampler_add(ctx, zds, &ctx->di.sampler_surfaces[stage][index + k], sampler, num_resources++, cache_hit);
1374 } else {
1375 struct zink_image_view *image_view = &ctx->image_views[stage][index + k];
1376 desc_set_image_add(ctx, zds, image_view, num_resources++, is_buffer, cache_hit);
1377 }
1378 }
1379 if (is_buffer)
1380 wds[num_wds].pTexelBufferView = buffer_info;
1381 else
1382 wds[num_wds].pImageInfo = image_info;
1383 }
1384 break;
1385 default:
1386 unreachable("unknown descriptor type");
1387 }
1388 num_wds = init_write_descriptor(shader, zds->desc_set, type, j, &wds[num_wds], num_wds);
1389 }
1390 }
1391 if (num_wds)
1392 VKSCR(UpdateDescriptorSets)(screen->dev, num_wds, wds, 0, NULL);
1393 }
1394
1395 static void
1396 zink_context_update_descriptor_states(struct zink_context *ctx, struct zink_program *pg);
1397
1398 #define MAX_CACHE_MISSES 50
1399
1400 void
zink_descriptors_update(struct zink_context * ctx,bool is_compute)1401 zink_descriptors_update(struct zink_context *ctx, bool is_compute)
1402 {
1403 struct zink_program *pg = is_compute ? (struct zink_program *)ctx->curr_compute : (struct zink_program *)ctx->curr_program;
1404
1405 zink_context_update_descriptor_states(ctx, pg);
1406 bool cache_hit;
1407 VkDescriptorSet desc_set = VK_NULL_HANDLE;
1408 struct zink_descriptor_set *zds = NULL;
1409
1410 struct zink_batch *batch = &ctx->batch;
1411 VkPipelineBindPoint bp = is_compute ? VK_PIPELINE_BIND_POINT_COMPUTE : VK_PIPELINE_BIND_POINT_GRAPHICS;
1412
1413 {
1414 uint32_t dynamic_offsets[PIPE_MAX_CONSTANT_BUFFERS];
1415 unsigned dynamic_offset_idx = 0;
1416
1417 /* push set is indexed in vulkan as 0 but isn't in the general pool array */
1418 ctx->dd->changed[is_compute][ZINK_DESCRIPTOR_TYPES] |= ctx->dd->pg[is_compute] != pg;
1419 if (pg->dd->push_usage) {
1420 if (pg->dd->fbfetch) {
1421 /* fbfetch is not cacheable: grab a lazy set because it's faster */
1422 desc_set = zink_descriptors_alloc_lazy_push(ctx);
1423 } else {
1424 zds = zink_descriptor_set_get(ctx, ZINK_DESCRIPTOR_TYPES, is_compute, &cache_hit);
1425 desc_set = zds ? zds->desc_set : VK_NULL_HANDLE;
1426 }
1427 } else {
1428 cache_hit = false;
1429 }
1430 ctx->dd->changed[is_compute][ZINK_DESCRIPTOR_TYPES] = false;
1431 if (!desc_set)
1432 desc_set = ctx->dd->dummy_set;
1433
1434 if (pg->dd->push_usage) // push set
1435 dynamic_offset_idx = update_push_ubo_descriptors(ctx, zds, desc_set,
1436 is_compute, cache_hit, dynamic_offsets);
1437 VKCTX(CmdBindDescriptorSets)(batch->state->cmdbuf, bp,
1438 pg->layout, 0, 1, &desc_set,
1439 dynamic_offset_idx, dynamic_offsets);
1440 }
1441
1442 {
1443 for (int h = 0; h < ZINK_DESCRIPTOR_TYPES; h++) {
1444 if (pdd_cached(pg)->cache_misses[h] < MAX_CACHE_MISSES) {
1445 ctx->dd->changed[is_compute][h] |= ctx->dd->pg[is_compute] != pg;
1446 if (pg->dsl[h + 1]) {
1447 /* null set has null pool */
1448 if (pdd_cached(pg)->pool[h]) {
1449 zds = zink_descriptor_set_get(ctx, h, is_compute, &cache_hit);
1450 if (cache_hit) {
1451 pdd_cached(pg)->cache_misses[h] = 0;
1452 } else if (likely(zink_screen(ctx->base.screen)->descriptor_mode != ZINK_DESCRIPTOR_MODE_NOFALLBACK)) {
1453 if (++pdd_cached(pg)->cache_misses[h] == MAX_CACHE_MISSES) {
1454 const char *set_names[] = {
1455 "UBO",
1456 "TEXTURES",
1457 "SSBO",
1458 "IMAGES",
1459 };
1460 debug_printf("zink: descriptor cache exploded for prog %p set %s: getting lazy (not a bug, just lettin you know)\n", pg, set_names[h]);
1461 }
1462 }
1463 } else
1464 zds = NULL;
1465 /* reuse dummy set for bind */
1466 desc_set = zds ? zds->desc_set : ctx->dd->dummy_set;
1467 update_descriptors_internal(ctx, h, zds, pg, cache_hit);
1468
1469 VKCTX(CmdBindDescriptorSets)(batch->state->cmdbuf, bp,
1470 pg->layout, h + 1, 1, &desc_set,
1471 0, NULL);
1472 }
1473 } else {
1474 zink_descriptors_update_lazy_masked(ctx, is_compute, BITFIELD_BIT(h), 0);
1475 }
1476 ctx->dd->changed[is_compute][h] = false;
1477 }
1478 }
1479 ctx->dd->pg[is_compute] = pg;
1480
1481 if (pg->dd->bindless && unlikely(!ctx->dd->bindless_bound)) {
1482 VKCTX(CmdBindDescriptorSets)(batch->state->cmdbuf, bp,
1483 pg->layout, ZINK_DESCRIPTOR_BINDLESS, 1, &ctx->dd->bindless_set,
1484 0, NULL);
1485 ctx->dd->bindless_bound = true;
1486 }
1487 }
1488
1489 void
zink_batch_descriptor_deinit(struct zink_screen * screen,struct zink_batch_state * bs)1490 zink_batch_descriptor_deinit(struct zink_screen *screen, struct zink_batch_state *bs)
1491 {
1492 if (!bs->dd)
1493 return;
1494 _mesa_set_destroy(bs->dd->desc_sets, NULL);
1495 zink_batch_descriptor_deinit_lazy(screen, bs);
1496 }
1497
1498 void
zink_batch_descriptor_reset(struct zink_screen * screen,struct zink_batch_state * bs)1499 zink_batch_descriptor_reset(struct zink_screen *screen, struct zink_batch_state *bs)
1500 {
1501 set_foreach(bs->dd->desc_sets, entry) {
1502 struct zink_descriptor_set *zds = (void*)entry->key;
1503 zink_batch_usage_unset(&zds->batch_uses, bs);
1504 /* reset descriptor pools when no bs is using this program to avoid
1505 * having some inactive program hogging a billion descriptors
1506 */
1507 pipe_reference(&zds->reference, NULL);
1508 zink_descriptor_set_recycle(zds);
1509 _mesa_set_remove(bs->dd->desc_sets, entry);
1510 }
1511 zink_batch_descriptor_reset_lazy(screen, bs);
1512 }
1513
1514 bool
zink_batch_descriptor_init(struct zink_screen * screen,struct zink_batch_state * bs)1515 zink_batch_descriptor_init(struct zink_screen *screen, struct zink_batch_state *bs)
1516 {
1517 if (!zink_batch_descriptor_init_lazy(screen, bs))
1518 return false;
1519 bs->dd->desc_sets = _mesa_pointer_set_create(bs);
1520 return !!bs->dd->desc_sets;
1521 }
1522
1523 static uint32_t
calc_descriptor_state_hash_ubo(struct zink_context * ctx,enum pipe_shader_type shader,int idx,uint32_t hash,bool need_offset)1524 calc_descriptor_state_hash_ubo(struct zink_context *ctx, enum pipe_shader_type shader, int idx, uint32_t hash, bool need_offset)
1525 {
1526 struct zink_resource *res = ctx->di.descriptor_res[ZINK_DESCRIPTOR_TYPE_UBO][shader][idx];
1527 struct zink_resource_object *obj = res ? res->obj : NULL;
1528 hash = XXH32(&obj, sizeof(void*), hash);
1529 void *hash_data = &ctx->di.ubos[shader][idx].range;
1530 size_t data_size = sizeof(unsigned);
1531 hash = XXH32(hash_data, data_size, hash);
1532 if (need_offset)
1533 hash = XXH32(&ctx->di.ubos[shader][idx].offset, sizeof(unsigned), hash);
1534 return hash;
1535 }
1536
1537 static uint32_t
calc_descriptor_state_hash_ssbo(struct zink_context * ctx,struct zink_shader * zs,enum pipe_shader_type shader,int i,int idx,uint32_t hash)1538 calc_descriptor_state_hash_ssbo(struct zink_context *ctx, struct zink_shader *zs, enum pipe_shader_type shader, int i, int idx, uint32_t hash)
1539 {
1540 struct zink_resource *res = ctx->di.descriptor_res[ZINK_DESCRIPTOR_TYPE_SSBO][shader][idx];
1541 struct zink_resource_object *obj = res ? res->obj : NULL;
1542 hash = XXH32(&obj, sizeof(void*), hash);
1543 if (obj) {
1544 struct pipe_shader_buffer *ssbo = &ctx->ssbos[shader][idx];
1545 hash = XXH32(&ssbo->buffer_offset, sizeof(ssbo->buffer_offset), hash);
1546 hash = XXH32(&ssbo->buffer_size, sizeof(ssbo->buffer_size), hash);
1547 }
1548 return hash;
1549 }
1550
1551 static uint32_t
calc_descriptor_state_hash_sampler(struct zink_context * ctx,struct zink_shader * zs,enum pipe_shader_type shader,int i,int idx,uint32_t hash)1552 calc_descriptor_state_hash_sampler(struct zink_context *ctx, struct zink_shader *zs, enum pipe_shader_type shader, int i, int idx, uint32_t hash)
1553 {
1554 for (unsigned k = 0; k < zs->bindings[ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW][i].size; k++) {
1555 struct zink_sampler_view *sampler_view = zink_sampler_view(ctx->sampler_views[shader][idx + k]);
1556 bool is_buffer = zink_shader_descriptor_is_buffer(zs, ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW, i);
1557 ctx->di.sampler_surfaces[shader][idx + k].is_buffer = is_buffer;
1558 uint32_t val = zink_get_sampler_view_hash(ctx, sampler_view, is_buffer);
1559 hash = XXH32(&val, sizeof(uint32_t), hash);
1560 if (is_buffer)
1561 continue;
1562
1563 struct zink_sampler_state *sampler_state = ctx->sampler_states[shader][idx + k];
1564
1565 if (sampler_state)
1566 hash = XXH32(&sampler_state->hash, sizeof(uint32_t), hash);
1567 }
1568 return hash;
1569 }
1570
1571 static uint32_t
calc_descriptor_state_hash_image(struct zink_context * ctx,struct zink_shader * zs,enum pipe_shader_type shader,int i,int idx,uint32_t hash)1572 calc_descriptor_state_hash_image(struct zink_context *ctx, struct zink_shader *zs, enum pipe_shader_type shader, int i, int idx, uint32_t hash)
1573 {
1574 for (unsigned k = 0; k < zs->bindings[ZINK_DESCRIPTOR_TYPE_IMAGE][i].size; k++) {
1575 bool is_buffer = zink_shader_descriptor_is_buffer(zs, ZINK_DESCRIPTOR_TYPE_IMAGE, i);
1576 uint32_t val = zink_get_image_view_hash(ctx, &ctx->image_views[shader][idx + k], is_buffer);
1577 ctx->di.image_surfaces[shader][idx + k].is_buffer = is_buffer;
1578 hash = XXH32(&val, sizeof(uint32_t), hash);
1579 }
1580 return hash;
1581 }
1582
1583 static uint32_t
update_descriptor_stage_state(struct zink_context * ctx,enum pipe_shader_type shader,enum zink_descriptor_type type)1584 update_descriptor_stage_state(struct zink_context *ctx, enum pipe_shader_type shader, enum zink_descriptor_type type)
1585 {
1586 struct zink_shader *zs = shader == PIPE_SHADER_COMPUTE ? ctx->compute_stage : ctx->gfx_stages[shader];
1587
1588 uint32_t hash = 0;
1589 for (int i = 0; i < zs->num_bindings[type]; i++) {
1590 /* skip push set members */
1591 if (zs->bindings[type][i].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
1592 continue;
1593
1594 int idx = zs->bindings[type][i].index;
1595 switch (type) {
1596 case ZINK_DESCRIPTOR_TYPE_UBO:
1597 hash = calc_descriptor_state_hash_ubo(ctx, shader, idx, hash, true);
1598 break;
1599 case ZINK_DESCRIPTOR_TYPE_SSBO:
1600 hash = calc_descriptor_state_hash_ssbo(ctx, zs, shader, i, idx, hash);
1601 break;
1602 case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
1603 hash = calc_descriptor_state_hash_sampler(ctx, zs, shader, i, idx, hash);
1604 break;
1605 case ZINK_DESCRIPTOR_TYPE_IMAGE:
1606 hash = calc_descriptor_state_hash_image(ctx, zs, shader, i, idx, hash);
1607 break;
1608 default:
1609 unreachable("unknown descriptor type");
1610 }
1611 }
1612 return hash;
1613 }
1614
1615 static void
update_descriptor_state(struct zink_context * ctx,enum zink_descriptor_type type,bool is_compute)1616 update_descriptor_state(struct zink_context *ctx, enum zink_descriptor_type type, bool is_compute)
1617 {
1618 /* we shouldn't be calling this if we don't have to */
1619 assert(!ctx->dd->descriptor_states[is_compute].valid[type]);
1620 bool has_any_usage = false;
1621
1622 if (is_compute) {
1623 /* just update compute state */
1624 bool has_usage = zink_program_get_descriptor_usage(ctx, PIPE_SHADER_COMPUTE, type);
1625 if (has_usage)
1626 ctx->dd->descriptor_states[is_compute].state[type] = update_descriptor_stage_state(ctx, PIPE_SHADER_COMPUTE, type);
1627 else
1628 ctx->dd->descriptor_states[is_compute].state[type] = 0;
1629 has_any_usage = has_usage;
1630 } else {
1631 /* update all gfx states */
1632 bool first = true;
1633 for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++) {
1634 bool has_usage = false;
1635 /* this is the incremental update for the shader stage */
1636 if (!ctx->dd->gfx_descriptor_states[i].valid[type]) {
1637 ctx->dd->gfx_descriptor_states[i].state[type] = 0;
1638 if (ctx->gfx_stages[i]) {
1639 has_usage = zink_program_get_descriptor_usage(ctx, i, type);
1640 if (has_usage)
1641 ctx->dd->gfx_descriptor_states[i].state[type] = update_descriptor_stage_state(ctx, i, type);
1642 ctx->dd->gfx_descriptor_states[i].valid[type] = has_usage;
1643 }
1644 }
1645 if (ctx->dd->gfx_descriptor_states[i].valid[type]) {
1646 /* this is the overall state update for the descriptor set hash */
1647 if (first) {
1648 /* no need to double hash the first state */
1649 ctx->dd->descriptor_states[is_compute].state[type] = ctx->dd->gfx_descriptor_states[i].state[type];
1650 first = false;
1651 } else {
1652 ctx->dd->descriptor_states[is_compute].state[type] = XXH32(&ctx->dd->gfx_descriptor_states[i].state[type],
1653 sizeof(uint32_t),
1654 ctx->dd->descriptor_states[is_compute].state[type]);
1655 }
1656 }
1657 has_any_usage |= has_usage;
1658 }
1659 }
1660 ctx->dd->descriptor_states[is_compute].valid[type] = has_any_usage;
1661 }
1662
1663 static void
zink_context_update_descriptor_states(struct zink_context * ctx,struct zink_program * pg)1664 zink_context_update_descriptor_states(struct zink_context *ctx, struct zink_program *pg)
1665 {
1666 if (pg->dd->push_usage && (!ctx->dd->push_valid[pg->is_compute] ||
1667 pg->dd->push_usage != ctx->dd->last_push_usage[pg->is_compute])) {
1668 uint32_t hash = 0;
1669 if (pg->is_compute) {
1670 hash = calc_descriptor_state_hash_ubo(ctx, PIPE_SHADER_COMPUTE, 0, 0, false);
1671 } else {
1672 bool first = true;
1673 u_foreach_bit(stage, pg->dd->push_usage) {
1674 if (!ctx->dd->gfx_push_valid[stage]) {
1675 ctx->dd->gfx_push_state[stage] = calc_descriptor_state_hash_ubo(ctx, stage, 0, 0, false);
1676 ctx->dd->gfx_push_valid[stage] = true;
1677 }
1678 if (first)
1679 hash = ctx->dd->gfx_push_state[stage];
1680 else
1681 hash = XXH32(&ctx->dd->gfx_push_state[stage], sizeof(uint32_t), hash);
1682 first = false;
1683 }
1684 }
1685 ctx->dd->push_state[pg->is_compute] = hash;
1686 ctx->dd->push_valid[pg->is_compute] = true;
1687 ctx->dd->last_push_usage[pg->is_compute] = pg->dd->push_usage;
1688 }
1689 for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
1690 if (pdd_cached(pg)->pool[i] && pdd_cached(pg)->cache_misses[i] < MAX_CACHE_MISSES &&
1691 !ctx->dd->descriptor_states[pg->is_compute].valid[i])
1692 update_descriptor_state(ctx, i, pg->is_compute);
1693 }
1694 }
1695
1696 void
zink_context_invalidate_descriptor_state(struct zink_context * ctx,enum pipe_shader_type shader,enum zink_descriptor_type type,unsigned start,unsigned count)1697 zink_context_invalidate_descriptor_state(struct zink_context *ctx, enum pipe_shader_type shader, enum zink_descriptor_type type, unsigned start, unsigned count)
1698 {
1699 zink_context_invalidate_descriptor_state_lazy(ctx, shader, type, start, count);
1700 if (type == ZINK_DESCRIPTOR_TYPE_UBO && !start) {
1701 /* ubo 0 is the push set */
1702 ctx->dd->push_state[shader == PIPE_SHADER_COMPUTE] = 0;
1703 ctx->dd->push_valid[shader == PIPE_SHADER_COMPUTE] = false;
1704 if (shader != PIPE_SHADER_COMPUTE) {
1705 ctx->dd->gfx_push_state[shader] = 0;
1706 ctx->dd->gfx_push_valid[shader] = false;
1707 }
1708 ctx->dd->changed[shader == PIPE_SHADER_COMPUTE][ZINK_DESCRIPTOR_TYPES] = true;
1709 return;
1710 }
1711 if (shader != PIPE_SHADER_COMPUTE) {
1712 ctx->dd->gfx_descriptor_states[shader].valid[type] = false;
1713 ctx->dd->gfx_descriptor_states[shader].state[type] = 0;
1714 }
1715 ctx->dd->descriptor_states[shader == PIPE_SHADER_COMPUTE].valid[type] = false;
1716 ctx->dd->descriptor_states[shader == PIPE_SHADER_COMPUTE].state[type] = 0;
1717 ctx->dd->changed[shader == PIPE_SHADER_COMPUTE][type] = true;
1718 }
1719
1720 bool
zink_descriptors_init(struct zink_context * ctx)1721 zink_descriptors_init(struct zink_context *ctx)
1722 {
1723 zink_descriptors_init_lazy(ctx);
1724 if (!ctx->dd)
1725 return false;
1726 return zink_descriptor_pool_init(ctx);
1727 }
1728
1729 void
zink_descriptors_deinit(struct zink_context * ctx)1730 zink_descriptors_deinit(struct zink_context *ctx)
1731 {
1732 zink_descriptor_pool_deinit(ctx);
1733 zink_descriptors_deinit_lazy(ctx);
1734 }
1735
1736 bool
zink_descriptor_layouts_init(struct zink_context * ctx)1737 zink_descriptor_layouts_init(struct zink_context *ctx)
1738 {
1739 for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++)
1740 if (!_mesa_hash_table_init(&ctx->desc_set_layouts[i], ctx, hash_descriptor_layout, equals_descriptor_layout))
1741 return false;
1742 return true;
1743 }
1744
1745 void
zink_descriptor_layouts_deinit(struct zink_context * ctx)1746 zink_descriptor_layouts_deinit(struct zink_context *ctx)
1747 {
1748 struct zink_screen *screen = zink_screen(ctx->base.screen);
1749 for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
1750 hash_table_foreach(&ctx->desc_set_layouts[i], he) {
1751 struct zink_descriptor_layout *layout = he->data;
1752 VKSCR(DestroyDescriptorSetLayout)(screen->dev, layout->layout, NULL);
1753 if (layout->desc_template)
1754 VKSCR(DestroyDescriptorUpdateTemplate)(screen->dev, layout->desc_template, NULL);
1755 ralloc_free(layout);
1756 _mesa_hash_table_remove(&ctx->desc_set_layouts[i], he);
1757 }
1758 }
1759 }
1760
1761
1762 void
zink_descriptor_util_init_fbfetch(struct zink_context * ctx)1763 zink_descriptor_util_init_fbfetch(struct zink_context *ctx)
1764 {
1765 if (ctx->dd->has_fbfetch)
1766 return;
1767
1768 struct zink_screen *screen = zink_screen(ctx->base.screen);
1769 VKSCR(DestroyDescriptorSetLayout)(screen->dev, ctx->dd->push_dsl[0]->layout, NULL);
1770 ralloc_free(ctx->dd->push_dsl[0]);
1771 ralloc_free(ctx->dd->push_layout_keys[0]);
1772 ctx->dd->push_dsl[0] = create_gfx_layout(ctx, &ctx->dd->push_layout_keys[0], true);
1773 ctx->dd->has_fbfetch = true;
1774 if (screen->descriptor_mode != ZINK_DESCRIPTOR_MODE_LAZY)
1775 zink_descriptor_pool_init(ctx);
1776 }
1777
1778 ALWAYS_INLINE static VkDescriptorType
type_from_bindless_index(unsigned idx)1779 type_from_bindless_index(unsigned idx)
1780 {
1781 switch (idx) {
1782 case 0: return VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
1783 case 1: return VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
1784 case 2: return VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
1785 case 3: return VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
1786 default:
1787 unreachable("unknown index");
1788 }
1789 }
1790
1791 void
zink_descriptors_init_bindless(struct zink_context * ctx)1792 zink_descriptors_init_bindless(struct zink_context *ctx)
1793 {
1794 if (ctx->dd->bindless_set)
1795 return;
1796
1797 struct zink_screen *screen = zink_screen(ctx->base.screen);
1798 VkDescriptorSetLayoutBinding bindings[4];
1799 const unsigned num_bindings = 4;
1800 VkDescriptorSetLayoutCreateInfo dcslci = {0};
1801 dcslci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
1802 dcslci.pNext = NULL;
1803 VkDescriptorSetLayoutBindingFlagsCreateInfo fci = {0};
1804 VkDescriptorBindingFlags flags[4];
1805 dcslci.pNext = &fci;
1806 dcslci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT;
1807 fci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO;
1808 fci.bindingCount = num_bindings;
1809 fci.pBindingFlags = flags;
1810 for (unsigned i = 0; i < num_bindings; i++) {
1811 flags[i] = VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT | VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT | VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT;
1812 }
1813 for (unsigned i = 0; i < num_bindings; i++) {
1814 bindings[i].binding = i;
1815 bindings[i].descriptorType = type_from_bindless_index(i);
1816 bindings[i].descriptorCount = ZINK_MAX_BINDLESS_HANDLES;
1817 bindings[i].stageFlags = VK_SHADER_STAGE_ALL_GRAPHICS | VK_SHADER_STAGE_COMPUTE_BIT;
1818 bindings[i].pImmutableSamplers = NULL;
1819 }
1820
1821 dcslci.bindingCount = num_bindings;
1822 dcslci.pBindings = bindings;
1823 if (VKSCR(CreateDescriptorSetLayout)(screen->dev, &dcslci, 0, &ctx->dd->bindless_layout) != VK_SUCCESS) {
1824 debug_printf("vkCreateDescriptorSetLayout failed\n");
1825 return;
1826 }
1827
1828 VkDescriptorPoolCreateInfo dpci = {0};
1829 VkDescriptorPoolSize sizes[4];
1830 for (unsigned i = 0; i < 4; i++) {
1831 sizes[i].type = type_from_bindless_index(i);
1832 sizes[i].descriptorCount = ZINK_MAX_BINDLESS_HANDLES;
1833 }
1834 dpci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
1835 dpci.pPoolSizes = sizes;
1836 dpci.poolSizeCount = 4;
1837 dpci.flags = VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT;
1838 dpci.maxSets = 1;
1839 if (VKSCR(CreateDescriptorPool)(screen->dev, &dpci, 0, &ctx->dd->bindless_pool) != VK_SUCCESS) {
1840 debug_printf("vkCreateDescriptorPool failed\n");
1841 return;
1842 }
1843
1844 zink_descriptor_util_alloc_sets(screen, ctx->dd->bindless_layout, ctx->dd->bindless_pool, &ctx->dd->bindless_set, 1);
1845 }
1846
1847 void
zink_descriptors_deinit_bindless(struct zink_context * ctx)1848 zink_descriptors_deinit_bindless(struct zink_context *ctx)
1849 {
1850 struct zink_screen *screen = zink_screen(ctx->base.screen);
1851 if (ctx->dd->bindless_layout)
1852 VKSCR(DestroyDescriptorSetLayout)(screen->dev, ctx->dd->bindless_layout, NULL);
1853 if (ctx->dd->bindless_pool)
1854 VKSCR(DestroyDescriptorPool)(screen->dev, ctx->dd->bindless_pool, NULL);
1855 }
1856
1857 void
zink_descriptors_update_bindless(struct zink_context * ctx)1858 zink_descriptors_update_bindless(struct zink_context *ctx)
1859 {
1860 struct zink_screen *screen = zink_screen(ctx->base.screen);
1861 for (unsigned i = 0; i < 2; i++) {
1862 if (!ctx->di.bindless_dirty[i])
1863 continue;
1864 while (util_dynarray_contains(&ctx->di.bindless[i].updates, uint32_t)) {
1865 uint32_t handle = util_dynarray_pop(&ctx->di.bindless[i].updates, uint32_t);
1866 bool is_buffer = ZINK_BINDLESS_IS_BUFFER(handle);
1867 VkWriteDescriptorSet wd;
1868 wd.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
1869 wd.pNext = NULL;
1870 wd.dstSet = ctx->dd->bindless_set;
1871 wd.dstBinding = is_buffer ? i * 2 + 1: i * 2;
1872 wd.dstArrayElement = is_buffer ? handle - ZINK_MAX_BINDLESS_HANDLES : handle;
1873 wd.descriptorCount = 1;
1874 wd.descriptorType = type_from_bindless_index(wd.dstBinding);
1875 if (is_buffer)
1876 wd.pTexelBufferView = &ctx->di.bindless[i].buffer_infos[wd.dstArrayElement];
1877 else
1878 wd.pImageInfo = &ctx->di.bindless[i].img_infos[handle];
1879 VKSCR(UpdateDescriptorSets)(screen->dev, 1, &wd, 0, NULL);
1880 }
1881 }
1882 ctx->di.any_bindless_dirty = 0;
1883 }
1884