• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2018 Collabora Ltd.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "zink_program.h"
25 
26 #include "zink_compiler.h"
27 #include "zink_context.h"
28 #include "zink_render_pass.h"
29 #include "zink_screen.h"
30 
31 #include "util/hash_table.h"
32 #include "util/set.h"
33 #include "util/u_debug.h"
34 #include "util/u_memory.h"
35 #include "tgsi/tgsi_from_mesa.h"
36 
37 struct pipeline_cache_entry {
38    struct zink_gfx_pipeline_state state;
39    VkPipeline pipeline;
40 };
41 
42 void
debug_describe_zink_gfx_program(char * buf,const struct zink_gfx_program * ptr)43 debug_describe_zink_gfx_program(char *buf, const struct zink_gfx_program *ptr)
44 {
45    sprintf(buf, "zink_gfx_program");
46 }
47 
48 static void
debug_describe_zink_shader_module(char * buf,const struct zink_shader_module * ptr)49 debug_describe_zink_shader_module(char *buf, const struct zink_shader_module *ptr)
50 {
51    sprintf(buf, "zink_shader_module");
52 }
53 
54 static VkDescriptorSetLayout
create_desc_set_layout(VkDevice dev,struct zink_shader * stages[ZINK_SHADER_COUNT],unsigned * num_descriptors)55 create_desc_set_layout(VkDevice dev,
56                        struct zink_shader *stages[ZINK_SHADER_COUNT],
57                        unsigned *num_descriptors)
58 {
59    VkDescriptorSetLayoutBinding bindings[PIPE_SHADER_TYPES * PIPE_MAX_CONSTANT_BUFFERS];
60    int num_bindings = 0;
61 
62    for (int i = 0; i < ZINK_SHADER_COUNT; i++) {
63       struct zink_shader *shader = stages[i];
64       if (!shader)
65          continue;
66 
67       VkShaderStageFlagBits stage_flags = zink_shader_stage(i);
68       for (int j = 0; j < shader->num_bindings; j++) {
69          assert(num_bindings < ARRAY_SIZE(bindings));
70          bindings[num_bindings].binding = shader->bindings[j].binding;
71          bindings[num_bindings].descriptorType = shader->bindings[j].type;
72          bindings[num_bindings].descriptorCount = 1;
73          bindings[num_bindings].stageFlags = stage_flags;
74          bindings[num_bindings].pImmutableSamplers = NULL;
75          ++num_bindings;
76       }
77    }
78 
79    VkDescriptorSetLayoutCreateInfo dcslci = {};
80    dcslci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
81    dcslci.pNext = NULL;
82    dcslci.flags = 0;
83    dcslci.bindingCount = num_bindings;
84    dcslci.pBindings = bindings;
85 
86    VkDescriptorSetLayout dsl;
87    if (vkCreateDescriptorSetLayout(dev, &dcslci, 0, &dsl) != VK_SUCCESS) {
88       debug_printf("vkCreateDescriptorSetLayout failed\n");
89       return VK_NULL_HANDLE;
90    }
91 
92    *num_descriptors = num_bindings;
93    return dsl;
94 }
95 
96 static VkPipelineLayout
create_pipeline_layout(VkDevice dev,VkDescriptorSetLayout dsl)97 create_pipeline_layout(VkDevice dev, VkDescriptorSetLayout dsl)
98 {
99    assert(dsl != VK_NULL_HANDLE);
100 
101    VkPipelineLayoutCreateInfo plci = {};
102    plci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
103 
104    plci.pSetLayouts = &dsl;
105    plci.setLayoutCount = 1;
106 
107    VkPipelineLayout layout;
108    if (vkCreatePipelineLayout(dev, &plci, NULL, &layout) != VK_SUCCESS) {
109       debug_printf("vkCreatePipelineLayout failed!\n");
110       return VK_NULL_HANDLE;
111    }
112 
113    return layout;
114 }
115 
116 static void
zink_destroy_shader_module(struct zink_screen * screen,struct zink_shader_module * zm)117 zink_destroy_shader_module(struct zink_screen *screen, struct zink_shader_module *zm)
118 {
119    vkDestroyShaderModule(screen->dev, zm->shader, NULL);
120    free(zm);
121 }
122 
123 static inline void
zink_shader_module_reference(struct zink_screen * screen,struct zink_shader_module ** dst,struct zink_shader_module * src)124 zink_shader_module_reference(struct zink_screen *screen,
125                            struct zink_shader_module **dst,
126                            struct zink_shader_module *src)
127 {
128    struct zink_shader_module *old_dst = dst ? *dst : NULL;
129 
130    if (pipe_reference_described(old_dst ? &old_dst->reference : NULL, &src->reference,
131                                 (debug_reference_descriptor)debug_describe_zink_shader_module))
132       zink_destroy_shader_module(screen, old_dst);
133    if (dst) *dst = src;
134 }
135 
136 static void
update_shader_modules(struct zink_context * ctx,struct zink_shader * stages[ZINK_SHADER_COUNT],struct zink_gfx_program * prog)137 update_shader_modules(struct zink_context *ctx, struct zink_shader *stages[ZINK_SHADER_COUNT], struct zink_gfx_program *prog)
138 {
139    struct zink_shader *dirty[ZINK_SHADER_COUNT] = {NULL};
140 
141    /* we need to map pipe_shader_type -> gl_shader_stage so we can ensure that we're compiling
142     * the shaders in pipeline order and have builtin input/output locations match up after being compacted
143     */
144    unsigned dirty_shader_stages = ctx->dirty_shader_stages;
145    while (dirty_shader_stages) {
146       unsigned type = u_bit_scan(&dirty_shader_stages);
147       dirty[tgsi_processor_to_shader_stage(type)] = stages[type];
148    }
149 
150    for (int i = 0; i < ZINK_SHADER_COUNT; ++i) {
151       enum pipe_shader_type type = pipe_shader_type_from_mesa(i);
152       if (dirty[i]) {
153          prog->modules[type] = CALLOC_STRUCT(zink_shader_module);
154          assert(prog->modules[type]);
155          pipe_reference_init(&prog->modules[type]->reference, 1);
156          dirty[i]->has_geometry_shader = dirty[MESA_SHADER_GEOMETRY] || stages[PIPE_SHADER_GEOMETRY];
157          prog->modules[type]->shader = zink_shader_compile(zink_screen(ctx->base.screen), dirty[i],
158                                                            prog->shader_slot_map, &prog->shader_slots_reserved);
159       } else if (stages[type]) /* reuse existing shader module */
160          zink_shader_module_reference(zink_screen(ctx->base.screen), &prog->modules[type], ctx->curr_program->modules[type]);
161       prog->shaders[type] = stages[type];
162    }
163    ctx->dirty_shader_stages = 0;
164 }
165 
166 static uint32_t
hash_gfx_pipeline_state(const void * key)167 hash_gfx_pipeline_state(const void *key)
168 {
169    return _mesa_hash_data(key, offsetof(struct zink_gfx_pipeline_state, hash));
170 }
171 
172 static bool
equals_gfx_pipeline_state(const void * a,const void * b)173 equals_gfx_pipeline_state(const void *a, const void *b)
174 {
175    return memcmp(a, b, offsetof(struct zink_gfx_pipeline_state, hash)) == 0;
176 }
177 
178 static void
init_slot_map(struct zink_context * ctx,struct zink_gfx_program * prog)179 init_slot_map(struct zink_context *ctx, struct zink_gfx_program *prog)
180 {
181    unsigned existing_shaders = 0;
182 
183    /* if there's a case where we'll be reusing any shaders, we need to reuse the slot map too */
184    if (ctx->curr_program) {
185       for (int i = 0; i < ZINK_SHADER_COUNT; ++i) {
186           if (ctx->curr_program->shaders[i])
187              existing_shaders |= 1 << i;
188       }
189    }
190    if (ctx->dirty_shader_stages == existing_shaders || !existing_shaders)
191       /* all shaders are being recompiled: new slot map */
192       memset(prog->shader_slot_map, -1, sizeof(prog->shader_slot_map));
193    else {
194       /* at least some shaders are being reused: use existing slot map so locations match up */
195       memcpy(prog->shader_slot_map, ctx->curr_program->shader_slot_map, sizeof(prog->shader_slot_map));
196       prog->shader_slots_reserved = ctx->curr_program->shader_slots_reserved;
197    }
198 }
199 
200 struct zink_gfx_program *
zink_create_gfx_program(struct zink_context * ctx,struct zink_shader * stages[ZINK_SHADER_COUNT])201 zink_create_gfx_program(struct zink_context *ctx,
202                         struct zink_shader *stages[ZINK_SHADER_COUNT])
203 {
204    struct zink_screen *screen = zink_screen(ctx->base.screen);
205    struct zink_gfx_program *prog = CALLOC_STRUCT(zink_gfx_program);
206    if (!prog)
207       goto fail;
208 
209    pipe_reference_init(&prog->reference, 1);
210 
211    init_slot_map(ctx, prog);
212 
213    update_shader_modules(ctx, stages, prog);
214 
215    for (int i = 0; i < ARRAY_SIZE(prog->pipelines); ++i) {
216       prog->pipelines[i] = _mesa_hash_table_create(NULL,
217                                                    hash_gfx_pipeline_state,
218                                                    equals_gfx_pipeline_state);
219       if (!prog->pipelines[i])
220          goto fail;
221    }
222 
223    for (int i = 0; i < ZINK_SHADER_COUNT; ++i) {
224       if (prog->modules[i]) {
225          _mesa_set_add(stages[i]->programs, prog);
226          zink_gfx_program_reference(screen, NULL, prog);
227       }
228    }
229 
230    prog->dsl = create_desc_set_layout(screen->dev, stages,
231                                       &prog->num_descriptors);
232    if (!prog->dsl)
233       goto fail;
234 
235    prog->layout = create_pipeline_layout(screen->dev, prog->dsl);
236    if (!prog->layout)
237       goto fail;
238 
239    prog->render_passes = _mesa_set_create(NULL, _mesa_hash_pointer,
240                                           _mesa_key_pointer_equal);
241    if (!prog->render_passes)
242       goto fail;
243 
244    return prog;
245 
246 fail:
247    if (prog)
248       zink_destroy_gfx_program(screen, prog);
249    return NULL;
250 }
251 
252 static void
gfx_program_remove_shader(struct zink_gfx_program * prog,struct zink_shader * shader)253 gfx_program_remove_shader(struct zink_gfx_program *prog, struct zink_shader *shader)
254 {
255    enum pipe_shader_type p_stage = pipe_shader_type_from_mesa(shader->nir->info.stage);
256 
257    assert(prog->shaders[p_stage] == shader);
258    prog->shaders[p_stage] = NULL;
259    _mesa_set_remove_key(shader->programs, prog);
260 }
261 
262 void
zink_destroy_gfx_program(struct zink_screen * screen,struct zink_gfx_program * prog)263 zink_destroy_gfx_program(struct zink_screen *screen,
264                          struct zink_gfx_program *prog)
265 {
266    if (prog->layout)
267       vkDestroyPipelineLayout(screen->dev, prog->layout, NULL);
268 
269    if (prog->dsl)
270       vkDestroyDescriptorSetLayout(screen->dev, prog->dsl, NULL);
271 
272    for (int i = 0; i < ZINK_SHADER_COUNT; ++i) {
273       if (prog->shaders[i])
274          gfx_program_remove_shader(prog, prog->shaders[i]);
275       if (prog->modules[i])
276          zink_shader_module_reference(screen, &prog->modules[i], NULL);
277    }
278 
279    /* unref all used render-passes */
280    if (prog->render_passes) {
281       set_foreach(prog->render_passes, entry) {
282          struct zink_render_pass *pres = (struct zink_render_pass *)entry->key;
283          zink_render_pass_reference(screen, &pres, NULL);
284       }
285       _mesa_set_destroy(prog->render_passes, NULL);
286    }
287 
288    for (int i = 0; i < ARRAY_SIZE(prog->pipelines); ++i) {
289       hash_table_foreach(prog->pipelines[i], entry) {
290          struct pipeline_cache_entry *pc_entry = entry->data;
291 
292          vkDestroyPipeline(screen->dev, pc_entry->pipeline, NULL);
293          free(pc_entry);
294       }
295       _mesa_hash_table_destroy(prog->pipelines[i], NULL);
296    }
297 
298    FREE(prog);
299 }
300 
301 static VkPrimitiveTopology
primitive_topology(enum pipe_prim_type mode)302 primitive_topology(enum pipe_prim_type mode)
303 {
304    switch (mode) {
305    case PIPE_PRIM_POINTS:
306       return VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
307 
308    case PIPE_PRIM_LINES:
309       return VK_PRIMITIVE_TOPOLOGY_LINE_LIST;
310 
311    case PIPE_PRIM_LINE_STRIP:
312       return VK_PRIMITIVE_TOPOLOGY_LINE_STRIP;
313 
314    case PIPE_PRIM_TRIANGLES:
315       return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
316 
317    case PIPE_PRIM_TRIANGLE_STRIP:
318       return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
319 
320    case PIPE_PRIM_TRIANGLE_FAN:
321       return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN;
322 
323    case PIPE_PRIM_LINE_STRIP_ADJACENCY:
324       return VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY;
325 
326    case PIPE_PRIM_LINES_ADJACENCY:
327       return VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY;
328 
329    case PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY:
330       return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY;
331 
332    case PIPE_PRIM_TRIANGLES_ADJACENCY:
333       return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY;
334 
335    default:
336       unreachable("unexpected enum pipe_prim_type");
337    }
338 }
339 
340 static void
reference_render_pass(struct zink_screen * screen,struct zink_gfx_program * prog,struct zink_render_pass * render_pass)341 reference_render_pass(struct zink_screen *screen,
342                       struct zink_gfx_program *prog,
343                       struct zink_render_pass *render_pass)
344 {
345    struct set_entry *entry = _mesa_set_search(prog->render_passes,
346                                               render_pass);
347    if (!entry) {
348       entry = _mesa_set_add(prog->render_passes, render_pass);
349       pipe_reference(NULL, &render_pass->reference);
350    }
351 }
352 
353 VkPipeline
zink_get_gfx_pipeline(struct zink_screen * screen,struct zink_gfx_program * prog,struct zink_gfx_pipeline_state * state,enum pipe_prim_type mode)354 zink_get_gfx_pipeline(struct zink_screen *screen,
355                       struct zink_gfx_program *prog,
356                       struct zink_gfx_pipeline_state *state,
357                       enum pipe_prim_type mode)
358 {
359    VkPrimitiveTopology vkmode = primitive_topology(mode);
360    assert(vkmode <= ARRAY_SIZE(prog->pipelines));
361 
362    struct hash_entry *entry = NULL;
363 
364    if (!state->hash) {
365       state->hash = hash_gfx_pipeline_state(state);
366       /* make sure the hash is not zero, as we take it as invalid.
367        * TODO: rework this using a separate dirty-bit */
368       assert(state->hash != 0);
369    }
370    entry = _mesa_hash_table_search_pre_hashed(prog->pipelines[vkmode], state->hash, state);
371 
372    if (!entry) {
373       VkPipeline pipeline = zink_create_gfx_pipeline(screen, prog,
374                                                      state, vkmode);
375       if (pipeline == VK_NULL_HANDLE)
376          return VK_NULL_HANDLE;
377 
378       struct pipeline_cache_entry *pc_entry = CALLOC_STRUCT(pipeline_cache_entry);
379       if (!pc_entry)
380          return VK_NULL_HANDLE;
381 
382       memcpy(&pc_entry->state, state, sizeof(*state));
383       pc_entry->pipeline = pipeline;
384 
385       assert(state->hash);
386       entry = _mesa_hash_table_insert_pre_hashed(prog->pipelines[vkmode], state->hash, state, pc_entry);
387       assert(entry);
388 
389       reference_render_pass(screen, prog, state->render_pass);
390    }
391 
392    return ((struct pipeline_cache_entry *)(entry->data))->pipeline;
393 }
394 
395 
396 static void *
zink_create_vs_state(struct pipe_context * pctx,const struct pipe_shader_state * shader)397 zink_create_vs_state(struct pipe_context *pctx,
398                      const struct pipe_shader_state *shader)
399 {
400    struct nir_shader *nir;
401    if (shader->type != PIPE_SHADER_IR_NIR)
402       nir = zink_tgsi_to_nir(pctx->screen, shader->tokens);
403    else
404       nir = (struct nir_shader *)shader->ir.nir;
405 
406    return zink_shader_create(zink_screen(pctx->screen), nir, &shader->stream_output);
407 }
408 
409 static void
bind_stage(struct zink_context * ctx,enum pipe_shader_type stage,struct zink_shader * shader)410 bind_stage(struct zink_context *ctx, enum pipe_shader_type stage,
411            struct zink_shader *shader)
412 {
413    assert(stage < PIPE_SHADER_COMPUTE);
414    ctx->gfx_stages[stage] = shader;
415    ctx->dirty_shader_stages |= 1 << stage;
416 }
417 
418 static void
zink_bind_vs_state(struct pipe_context * pctx,void * cso)419 zink_bind_vs_state(struct pipe_context *pctx,
420                    void *cso)
421 {
422    bind_stage(zink_context(pctx), PIPE_SHADER_VERTEX, cso);
423 }
424 
425 static void *
zink_create_fs_state(struct pipe_context * pctx,const struct pipe_shader_state * shader)426 zink_create_fs_state(struct pipe_context *pctx,
427                      const struct pipe_shader_state *shader)
428 {
429    struct nir_shader *nir;
430    if (shader->type != PIPE_SHADER_IR_NIR)
431       nir = zink_tgsi_to_nir(pctx->screen, shader->tokens);
432    else
433       nir = (struct nir_shader *)shader->ir.nir;
434 
435    return zink_shader_create(zink_screen(pctx->screen), nir, NULL);
436 }
437 
438 static void
zink_bind_fs_state(struct pipe_context * pctx,void * cso)439 zink_bind_fs_state(struct pipe_context *pctx,
440                    void *cso)
441 {
442    bind_stage(zink_context(pctx), PIPE_SHADER_FRAGMENT, cso);
443 }
444 
445 static void *
zink_create_gs_state(struct pipe_context * pctx,const struct pipe_shader_state * shader)446 zink_create_gs_state(struct pipe_context *pctx,
447                      const struct pipe_shader_state *shader)
448 {
449    struct nir_shader *nir;
450    if (shader->type != PIPE_SHADER_IR_NIR)
451       nir = zink_tgsi_to_nir(pctx->screen, shader->tokens);
452    else
453       nir = (struct nir_shader *)shader->ir.nir;
454 
455    return zink_shader_create(zink_screen(pctx->screen), nir, &shader->stream_output);
456 }
457 
458 static void
zink_bind_gs_state(struct pipe_context * pctx,void * cso)459 zink_bind_gs_state(struct pipe_context *pctx,
460                    void *cso)
461 {
462    bind_stage(zink_context(pctx), PIPE_SHADER_GEOMETRY, cso);
463 }
464 
465 static void
zink_delete_shader_state(struct pipe_context * pctx,void * cso)466 zink_delete_shader_state(struct pipe_context *pctx, void *cso)
467 {
468    zink_shader_free(zink_context(pctx), cso);
469 }
470 
471 
472 void
zink_program_init(struct zink_context * ctx)473 zink_program_init(struct zink_context *ctx)
474 {
475    ctx->base.create_vs_state = zink_create_vs_state;
476    ctx->base.bind_vs_state = zink_bind_vs_state;
477    ctx->base.delete_vs_state = zink_delete_shader_state;
478 
479    ctx->base.create_fs_state = zink_create_fs_state;
480    ctx->base.bind_fs_state = zink_bind_fs_state;
481    ctx->base.delete_fs_state = zink_delete_shader_state;
482 
483    ctx->base.create_gs_state = zink_create_gs_state;
484    ctx->base.bind_gs_state = zink_bind_gs_state;
485    ctx->base.delete_gs_state = zink_delete_shader_state;
486 }
487