• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2020 Valve Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 #include "helpers.h"
25 
26 #include "common/amd_family.h"
27 #include "vulkan/vk_format.h"
28 
29 #include <llvm-c/Target.h>
30 
31 #include <mutex>
32 #include <sstream>
33 #include <stdio.h>
34 
35 using namespace aco;
36 
37 extern "C" {
38 PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(VkInstance instance, const char* pName);
39 }
40 
41 ac_shader_config config;
42 aco_shader_info info;
43 std::unique_ptr<Program> program;
44 Builder bld(NULL);
45 Temp inputs[16];
46 
47 static VkInstance instance_cache[CHIP_LAST] = {VK_NULL_HANDLE};
48 static VkDevice device_cache[CHIP_LAST] = {VK_NULL_HANDLE};
49 static std::mutex create_device_mutex;
50 
51 #define FUNCTION_LIST                                                                              \
52    ITEM(CreateInstance)                                                                            \
53    ITEM(DestroyInstance)                                                                           \
54    ITEM(EnumeratePhysicalDevices)                                                                  \
55    ITEM(GetPhysicalDeviceProperties2)                                                              \
56    ITEM(CreateDevice)                                                                              \
57    ITEM(DestroyDevice)                                                                             \
58    ITEM(CreateShaderModule)                                                                        \
59    ITEM(DestroyShaderModule)                                                                       \
60    ITEM(CreateGraphicsPipelines)                                                                   \
61    ITEM(CreateComputePipelines)                                                                    \
62    ITEM(DestroyPipeline)                                                                           \
63    ITEM(CreateDescriptorSetLayout)                                                                 \
64    ITEM(DestroyDescriptorSetLayout)                                                                \
65    ITEM(CreatePipelineLayout)                                                                      \
66    ITEM(DestroyPipelineLayout)                                                                     \
67    ITEM(CreateRenderPass)                                                                          \
68    ITEM(DestroyRenderPass)                                                                         \
69    ITEM(GetPipelineExecutablePropertiesKHR)                                                        \
70    ITEM(GetPipelineExecutableInternalRepresentationsKHR)
71 
72 #define ITEM(n) PFN_vk##n n;
73 FUNCTION_LIST
74 #undef ITEM
75 
76 void
create_program(enum amd_gfx_level gfx_level,Stage stage,unsigned wave_size,enum radeon_family family)77 create_program(enum amd_gfx_level gfx_level, Stage stage, unsigned wave_size,
78                enum radeon_family family)
79 {
80    memset(&config, 0, sizeof(config));
81    info.wave_size = wave_size;
82 
83    program.reset(new Program);
84    aco::init_program(program.get(), stage, &info, gfx_level, family, false, &config);
85    program->workgroup_size = UINT_MAX;
86    calc_min_waves(program.get());
87 
88    program->debug.func = nullptr;
89    program->debug.private_data = nullptr;
90 
91    program->debug.output = output;
92    program->debug.shorten_messages = true;
93    program->debug.func = nullptr;
94    program->debug.private_data = nullptr;
95 
96    Block* block = program->create_and_insert_block();
97    block->kind = block_kind_top_level;
98 
99    bld = Builder(program.get(), &program->blocks[0]);
100 
101    config.float_mode = program->blocks[0].fp_mode.val;
102 }
103 
104 bool
setup_cs(const char * input_spec,enum amd_gfx_level gfx_level,enum radeon_family family,const char * subvariant,unsigned wave_size)105 setup_cs(const char* input_spec, enum amd_gfx_level gfx_level, enum radeon_family family,
106          const char* subvariant, unsigned wave_size)
107 {
108    if (!set_variant(gfx_level, subvariant))
109       return false;
110 
111    memset(&info, 0, sizeof(info));
112    create_program(gfx_level, compute_cs, wave_size, family);
113 
114    if (input_spec) {
115       std::vector<RegClass> input_classes;
116       while (input_spec[0]) {
117          RegType type = input_spec[0] == 'v' ? RegType::vgpr : RegType::sgpr;
118          unsigned size = input_spec[1] - '0';
119          bool in_bytes = input_spec[2] == 'b';
120          input_classes.push_back(RegClass::get(type, size * (in_bytes ? 1 : 4)));
121 
122          input_spec += 2 + in_bytes;
123          while (input_spec[0] == ' ')
124             input_spec++;
125       }
126 
127       aco_ptr<Instruction> startpgm{create_instruction<Pseudo_instruction>(
128          aco_opcode::p_startpgm, Format::PSEUDO, 0, input_classes.size())};
129       for (unsigned i = 0; i < input_classes.size(); i++) {
130          inputs[i] = bld.tmp(input_classes[i]);
131          startpgm->definitions[i] = Definition(inputs[i]);
132       }
133       bld.insert(std::move(startpgm));
134    }
135 
136    return true;
137 }
138 
139 void
finish_program(Program * prog,bool endpgm)140 finish_program(Program* prog, bool endpgm)
141 {
142    for (Block& BB : prog->blocks) {
143       for (unsigned idx : BB.linear_preds)
144          prog->blocks[idx].linear_succs.emplace_back(BB.index);
145       for (unsigned idx : BB.logical_preds)
146          prog->blocks[idx].logical_succs.emplace_back(BB.index);
147    }
148 
149    if (endpgm) {
150       for (Block& block : prog->blocks) {
151          if (block.linear_succs.size() == 0) {
152             block.kind |= block_kind_uniform;
153             Builder(prog, &block).sopp(aco_opcode::s_endpgm);
154          }
155       }
156    }
157 }
158 
159 void
finish_validator_test()160 finish_validator_test()
161 {
162    finish_program(program.get());
163    aco_print_program(program.get(), output);
164    fprintf(output, "Validation results:\n");
165    if (aco::validate_ir(program.get()))
166       fprintf(output, "Validation passed\n");
167    else
168       fprintf(output, "Validation failed\n");
169 }
170 
171 void
finish_opt_test()172 finish_opt_test()
173 {
174    finish_program(program.get());
175    if (!aco::validate_ir(program.get())) {
176       fail_test("Validation before optimization failed");
177       return;
178    }
179    aco::optimize(program.get());
180    if (!aco::validate_ir(program.get())) {
181       fail_test("Validation after optimization failed");
182       return;
183    }
184    aco_print_program(program.get(), output);
185 }
186 
187 void
finish_setup_reduce_temp_test()188 finish_setup_reduce_temp_test()
189 {
190    finish_program(program.get());
191    if (!aco::validate_ir(program.get())) {
192       fail_test("Validation before setup_reduce_temp failed");
193       return;
194    }
195    aco::setup_reduce_temp(program.get());
196    if (!aco::validate_ir(program.get())) {
197       fail_test("Validation after setup_reduce_temp failed");
198       return;
199    }
200    aco_print_program(program.get(), output);
201 }
202 
203 void
finish_ra_test(ra_test_policy policy,bool lower)204 finish_ra_test(ra_test_policy policy, bool lower)
205 {
206    finish_program(program.get());
207    if (!aco::validate_ir(program.get())) {
208       fail_test("Validation before register allocation failed");
209       return;
210    }
211 
212    program->workgroup_size = program->wave_size;
213    aco::live live_vars = aco::live_var_analysis(program.get());
214    aco::register_allocation(program.get(), live_vars.live_out, policy);
215 
216    if (aco::validate_ra(program.get())) {
217       fail_test("Validation after register allocation failed");
218       return;
219    }
220 
221    if (lower) {
222       aco::ssa_elimination(program.get());
223       aco::lower_to_hw_instr(program.get());
224    }
225 
226    aco_print_program(program.get(), output);
227 }
228 
229 void
finish_optimizer_postRA_test()230 finish_optimizer_postRA_test()
231 {
232    finish_program(program.get());
233    aco::optimize_postRA(program.get());
234    aco_print_program(program.get(), output);
235 }
236 
237 void
finish_to_hw_instr_test()238 finish_to_hw_instr_test()
239 {
240    finish_program(program.get());
241    aco::lower_to_hw_instr(program.get());
242    aco_print_program(program.get(), output);
243 }
244 
245 void
finish_schedule_vopd_test()246 finish_schedule_vopd_test()
247 {
248    finish_program(program.get());
249    aco::schedule_vopd(program.get());
250    aco_print_program(program.get(), output);
251 }
252 
253 void
finish_waitcnt_test()254 finish_waitcnt_test()
255 {
256    finish_program(program.get());
257    aco::insert_wait_states(program.get());
258    aco_print_program(program.get(), output);
259 }
260 
261 void
finish_insert_nops_test(bool endpgm)262 finish_insert_nops_test(bool endpgm)
263 {
264    finish_program(program.get(), endpgm);
265    aco::insert_NOPs(program.get());
266    aco_print_program(program.get(), output);
267 }
268 
269 void
finish_form_hard_clause_test()270 finish_form_hard_clause_test()
271 {
272    finish_program(program.get());
273    aco::form_hard_clauses(program.get());
274    aco_print_program(program.get(), output);
275 }
276 
277 void
finish_assembler_test()278 finish_assembler_test()
279 {
280    finish_program(program.get());
281    std::vector<uint32_t> binary;
282    unsigned exec_size = emit_program(program.get(), binary);
283 
284    /* we could use CLRX for disassembly but that would require it to be
285     * installed */
286    if (program->gfx_level >= GFX8) {
287       print_asm(program.get(), binary, exec_size / 4u, output);
288    } else {
289       // TODO: maybe we should use CLRX and skip this test if it's not available?
290       for (uint32_t dword : binary)
291          fprintf(output, "%.8x\n", dword);
292    }
293 }
294 
295 void
writeout(unsigned i,Temp tmp)296 writeout(unsigned i, Temp tmp)
297 {
298    if (tmp.id())
299       bld.pseudo(aco_opcode::p_unit_test, Operand::c32(i), tmp);
300    else
301       bld.pseudo(aco_opcode::p_unit_test, Operand::c32(i));
302 }
303 
304 void
writeout(unsigned i,aco::Builder::Result res)305 writeout(unsigned i, aco::Builder::Result res)
306 {
307    bld.pseudo(aco_opcode::p_unit_test, Operand::c32(i), res);
308 }
309 
310 void
writeout(unsigned i,Operand op)311 writeout(unsigned i, Operand op)
312 {
313    bld.pseudo(aco_opcode::p_unit_test, Operand::c32(i), op);
314 }
315 
316 void
writeout(unsigned i,Operand op0,Operand op1)317 writeout(unsigned i, Operand op0, Operand op1)
318 {
319    bld.pseudo(aco_opcode::p_unit_test, Operand::c32(i), op0, op1);
320 }
321 
322 Temp
fneg(Temp src,Builder b)323 fneg(Temp src, Builder b)
324 {
325    if (src.bytes() == 2)
326       return b.vop2(aco_opcode::v_mul_f16, b.def(v2b), Operand::c16(0xbc00u), src);
327    else
328       return b.vop2(aco_opcode::v_mul_f32, b.def(v1), Operand::c32(0xbf800000u), src);
329 }
330 
331 Temp
fabs(Temp src,Builder b)332 fabs(Temp src, Builder b)
333 {
334    if (src.bytes() == 2) {
335       Builder::Result res =
336          b.vop2_e64(aco_opcode::v_mul_f16, b.def(v2b), Operand::c16(0x3c00), src);
337       res->valu().abs[1] = true;
338       return res;
339    } else {
340       Builder::Result res =
341          b.vop2_e64(aco_opcode::v_mul_f32, b.def(v1), Operand::c32(0x3f800000u), src);
342       res->valu().abs[1] = true;
343       return res;
344    }
345 }
346 
347 Temp
f2f32(Temp src,Builder b)348 f2f32(Temp src, Builder b)
349 {
350    return b.vop1(aco_opcode::v_cvt_f32_f16, b.def(v1), src);
351 }
352 
353 Temp
f2f16(Temp src,Builder b)354 f2f16(Temp src, Builder b)
355 {
356    return b.vop1(aco_opcode::v_cvt_f16_f32, b.def(v2b), src);
357 }
358 
359 Temp
u2u16(Temp src,Builder b)360 u2u16(Temp src, Builder b)
361 {
362    return b.pseudo(aco_opcode::p_extract_vector, b.def(v2b), src, Operand::zero());
363 }
364 
365 Temp
fadd(Temp src0,Temp src1,Builder b)366 fadd(Temp src0, Temp src1, Builder b)
367 {
368    if (src0.bytes() == 2)
369       return b.vop2(aco_opcode::v_add_f16, b.def(v2b), src0, src1);
370    else
371       return b.vop2(aco_opcode::v_add_f32, b.def(v1), src0, src1);
372 }
373 
374 Temp
fmul(Temp src0,Temp src1,Builder b)375 fmul(Temp src0, Temp src1, Builder b)
376 {
377    if (src0.bytes() == 2)
378       return b.vop2(aco_opcode::v_mul_f16, b.def(v2b), src0, src1);
379    else
380       return b.vop2(aco_opcode::v_mul_f32, b.def(v1), src0, src1);
381 }
382 
383 Temp
fma(Temp src0,Temp src1,Temp src2,Builder b)384 fma(Temp src0, Temp src1, Temp src2, Builder b)
385 {
386    if (src0.bytes() == 2)
387       return b.vop3(aco_opcode::v_fma_f16, b.def(v2b), src0, src1, src2);
388    else
389       return b.vop3(aco_opcode::v_fma_f32, b.def(v1), src0, src1, src2);
390 }
391 
392 Temp
fsat(Temp src,Builder b)393 fsat(Temp src, Builder b)
394 {
395    if (src.bytes() == 2)
396       return b.vop3(aco_opcode::v_med3_f16, b.def(v2b), Operand::c16(0u), Operand::c16(0x3c00u),
397                     src);
398    else
399       return b.vop3(aco_opcode::v_med3_f32, b.def(v1), Operand::zero(), Operand::c32(0x3f800000u),
400                     src);
401 }
402 
403 Temp
fmin(Temp src0,Temp src1,Builder b)404 fmin(Temp src0, Temp src1, Builder b)
405 {
406    return b.vop2(aco_opcode::v_min_f32, b.def(v1), src0, src1);
407 }
408 
409 Temp
fmax(Temp src0,Temp src1,Builder b)410 fmax(Temp src0, Temp src1, Builder b)
411 {
412    return b.vop2(aco_opcode::v_max_f32, b.def(v1), src0, src1);
413 }
414 
415 Temp
ext_ushort(Temp src,unsigned idx,Builder b)416 ext_ushort(Temp src, unsigned idx, Builder b)
417 {
418    return b.pseudo(aco_opcode::p_extract, b.def(src.regClass()), src, Operand::c32(idx),
419                    Operand::c32(16u), Operand::c32(false));
420 }
421 
422 Temp
ext_ubyte(Temp src,unsigned idx,Builder b)423 ext_ubyte(Temp src, unsigned idx, Builder b)
424 {
425    return b.pseudo(aco_opcode::p_extract, b.def(src.regClass()), src, Operand::c32(idx),
426                    Operand::c32(8u), Operand::c32(false));
427 }
428 
429 void
emit_divergent_if_else(Program * prog,aco::Builder & b,Operand cond,std::function<void ()> then,std::function<void ()> els)430 emit_divergent_if_else(Program* prog, aco::Builder& b, Operand cond, std::function<void()> then,
431                        std::function<void()> els)
432 {
433    prog->blocks.reserve(prog->blocks.size() + 6);
434 
435    Block* if_block = &prog->blocks.back();
436    Block* then_logical = prog->create_and_insert_block();
437    Block* then_linear = prog->create_and_insert_block();
438    Block* invert = prog->create_and_insert_block();
439    Block* else_logical = prog->create_and_insert_block();
440    Block* else_linear = prog->create_and_insert_block();
441    Block* endif_block = prog->create_and_insert_block();
442 
443    if_block->kind |= block_kind_branch;
444    invert->kind |= block_kind_invert;
445    endif_block->kind |= block_kind_merge | (if_block->kind & block_kind_top_level);
446 
447    /* Set up logical CF */
448    then_logical->logical_preds.push_back(if_block->index);
449    else_logical->logical_preds.push_back(if_block->index);
450    endif_block->logical_preds.push_back(then_logical->index);
451    endif_block->logical_preds.push_back(else_logical->index);
452 
453    /* Set up linear CF */
454    then_logical->linear_preds.push_back(if_block->index);
455    then_linear->linear_preds.push_back(if_block->index);
456    invert->linear_preds.push_back(then_logical->index);
457    invert->linear_preds.push_back(then_linear->index);
458    else_logical->linear_preds.push_back(invert->index);
459    else_linear->linear_preds.push_back(invert->index);
460    endif_block->linear_preds.push_back(else_logical->index);
461    endif_block->linear_preds.push_back(else_linear->index);
462 
463    PhysReg saved_exec_reg(84);
464 
465    b.reset(if_block);
466    Temp saved_exec = b.sop1(Builder::s_and_saveexec, b.def(b.lm, saved_exec_reg),
467                             Definition(scc, s1), Definition(exec, b.lm), cond, Operand(exec, b.lm));
468    b.branch(aco_opcode::p_cbranch_nz, Definition(vcc, bld.lm), then_logical->index,
469             then_linear->index);
470 
471    b.reset(then_logical);
472    b.pseudo(aco_opcode::p_logical_start);
473    then();
474    b.pseudo(aco_opcode::p_logical_end);
475    b.branch(aco_opcode::p_branch, Definition(vcc, bld.lm), invert->index);
476 
477    b.reset(then_linear);
478    b.branch(aco_opcode::p_branch, Definition(vcc, bld.lm), invert->index);
479 
480    b.reset(invert);
481    b.sop2(Builder::s_andn2, Definition(exec, bld.lm), Definition(scc, s1),
482           Operand(saved_exec, saved_exec_reg), Operand(exec, bld.lm));
483    b.branch(aco_opcode::p_cbranch_nz, Definition(vcc, bld.lm), else_logical->index,
484             else_linear->index);
485 
486    b.reset(else_logical);
487    b.pseudo(aco_opcode::p_logical_start);
488    els();
489    b.pseudo(aco_opcode::p_logical_end);
490    b.branch(aco_opcode::p_branch, Definition(vcc, bld.lm), endif_block->index);
491 
492    b.reset(else_linear);
493    b.branch(aco_opcode::p_branch, Definition(vcc, bld.lm), endif_block->index);
494 
495    b.reset(endif_block);
496    b.pseudo(aco_opcode::p_parallelcopy, Definition(exec, bld.lm),
497             Operand(saved_exec, saved_exec_reg));
498 }
499 
500 VkDevice
get_vk_device(enum amd_gfx_level gfx_level)501 get_vk_device(enum amd_gfx_level gfx_level)
502 {
503    enum radeon_family family;
504    switch (gfx_level) {
505    case GFX6: family = CHIP_TAHITI; break;
506    case GFX7: family = CHIP_BONAIRE; break;
507    case GFX8: family = CHIP_POLARIS10; break;
508    case GFX9: family = CHIP_VEGA10; break;
509    case GFX10: family = CHIP_NAVI10; break;
510    case GFX10_3: family = CHIP_NAVI21; break;
511    case GFX11: family = CHIP_NAVI31; break;
512    default: family = CHIP_UNKNOWN; break;
513    }
514    return get_vk_device(family);
515 }
516 
517 VkDevice
get_vk_device(enum radeon_family family)518 get_vk_device(enum radeon_family family)
519 {
520    assert(family != CHIP_UNKNOWN);
521 
522    std::lock_guard<std::mutex> guard(create_device_mutex);
523 
524    if (device_cache[family])
525       return device_cache[family];
526 
527    setenv("RADV_FORCE_FAMILY", ac_get_family_name(family), 1);
528 
529    VkApplicationInfo app_info = {};
530    app_info.pApplicationName = "aco_tests";
531    app_info.apiVersion = VK_API_VERSION_1_2;
532    VkInstanceCreateInfo instance_create_info = {};
533    instance_create_info.pApplicationInfo = &app_info;
534    instance_create_info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
535    ASSERTED VkResult result = ((PFN_vkCreateInstance)vk_icdGetInstanceProcAddr(
536       NULL, "vkCreateInstance"))(&instance_create_info, NULL, &instance_cache[family]);
537    assert(result == VK_SUCCESS);
538 
539 #define ITEM(n) n = (PFN_vk##n)vk_icdGetInstanceProcAddr(instance_cache[family], "vk" #n);
540    FUNCTION_LIST
541 #undef ITEM
542 
543    uint32_t device_count = 1;
544    VkPhysicalDevice device = VK_NULL_HANDLE;
545    result = EnumeratePhysicalDevices(instance_cache[family], &device_count, &device);
546    assert(result == VK_SUCCESS);
547    assert(device != VK_NULL_HANDLE);
548 
549    VkDeviceCreateInfo device_create_info = {};
550    device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
551    static const char* extensions[] = {"VK_KHR_pipeline_executable_properties"};
552    device_create_info.enabledExtensionCount = sizeof(extensions) / sizeof(extensions[0]);
553    device_create_info.ppEnabledExtensionNames = extensions;
554    result = CreateDevice(device, &device_create_info, NULL, &device_cache[family]);
555 
556    return device_cache[family];
557 }
558 
559 static struct DestroyDevices {
~DestroyDevicesDestroyDevices560    ~DestroyDevices()
561    {
562       for (unsigned i = 0; i < CHIP_LAST; i++) {
563          if (!device_cache[i])
564             continue;
565          DestroyDevice(device_cache[i], NULL);
566          DestroyInstance(instance_cache[i], NULL);
567       }
568    }
569 } destroy_devices;
570 
571 void
print_pipeline_ir(VkDevice device,VkPipeline pipeline,VkShaderStageFlagBits stages,const char * name,bool remove_encoding)572 print_pipeline_ir(VkDevice device, VkPipeline pipeline, VkShaderStageFlagBits stages,
573                   const char* name, bool remove_encoding)
574 {
575    uint32_t executable_count = 16;
576    VkPipelineExecutablePropertiesKHR executables[16];
577    VkPipelineInfoKHR pipeline_info;
578    pipeline_info.sType = VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR;
579    pipeline_info.pNext = NULL;
580    pipeline_info.pipeline = pipeline;
581    ASSERTED VkResult result =
582       GetPipelineExecutablePropertiesKHR(device, &pipeline_info, &executable_count, executables);
583    assert(result == VK_SUCCESS);
584 
585    uint32_t executable = 0;
586    for (; executable < executable_count; executable++) {
587       if (executables[executable].stages == stages)
588          break;
589    }
590    assert(executable != executable_count);
591 
592    VkPipelineExecutableInfoKHR exec_info;
593    exec_info.sType = VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INFO_KHR;
594    exec_info.pNext = NULL;
595    exec_info.pipeline = pipeline;
596    exec_info.executableIndex = executable;
597 
598    uint32_t ir_count = 16;
599    VkPipelineExecutableInternalRepresentationKHR ir[16];
600    memset(ir, 0, sizeof(ir));
601    result = GetPipelineExecutableInternalRepresentationsKHR(device, &exec_info, &ir_count, ir);
602    assert(result == VK_SUCCESS);
603 
604    VkPipelineExecutableInternalRepresentationKHR* requested_ir = nullptr;
605    for (unsigned i = 0; i < ir_count; ++i) {
606       if (strcmp(ir[i].name, name) == 0) {
607          requested_ir = &ir[i];
608          break;
609       }
610    }
611    assert(requested_ir && "Could not find requested IR");
612 
613    char* data = (char*)malloc(requested_ir->dataSize);
614    requested_ir->pData = data;
615    result = GetPipelineExecutableInternalRepresentationsKHR(device, &exec_info, &ir_count, ir);
616    assert(result == VK_SUCCESS);
617 
618    if (remove_encoding) {
619       for (char* c = data; *c; c++) {
620          if (*c == ';') {
621             for (; *c && *c != '\n'; c++)
622                *c = ' ';
623          }
624       }
625    }
626 
627    fprintf(output, "%s", data);
628    free(data);
629 }
630 
631 VkShaderModule
__qoCreateShaderModule(VkDevice dev,const QoShaderModuleCreateInfo * module_info)632 __qoCreateShaderModule(VkDevice dev, const QoShaderModuleCreateInfo* module_info)
633 {
634    VkShaderModuleCreateInfo vk_module_info;
635    vk_module_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
636    vk_module_info.pNext = NULL;
637    vk_module_info.flags = 0;
638    vk_module_info.codeSize = module_info->spirvSize;
639    vk_module_info.pCode = (const uint32_t*)module_info->pSpirv;
640 
641    VkShaderModule module;
642    ASSERTED VkResult result = CreateShaderModule(dev, &vk_module_info, NULL, &module);
643    assert(result == VK_SUCCESS);
644 
645    return module;
646 }
647 
PipelineBuilder(VkDevice dev)648 PipelineBuilder::PipelineBuilder(VkDevice dev)
649 {
650    memset(this, 0, sizeof(*this));
651    topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
652    device = dev;
653 }
654 
~PipelineBuilder()655 PipelineBuilder::~PipelineBuilder()
656 {
657    DestroyPipeline(device, pipeline, NULL);
658 
659    for (unsigned i = 0; i < (is_compute() ? 1 : gfx_pipeline_info.stageCount); i++) {
660       VkPipelineShaderStageCreateInfo* stage_info = &stages[i];
661       if (owned_stages & stage_info->stage)
662          DestroyShaderModule(device, stage_info->module, NULL);
663    }
664 
665    DestroyPipelineLayout(device, pipeline_layout, NULL);
666 
667    for (unsigned i = 0; i < util_bitcount64(desc_layouts_used); i++)
668       DestroyDescriptorSetLayout(device, desc_layouts[i], NULL);
669 
670    DestroyRenderPass(device, render_pass, NULL);
671 }
672 
673 void
add_desc_binding(VkShaderStageFlags stage_flags,uint32_t layout,uint32_t binding,VkDescriptorType type,uint32_t count)674 PipelineBuilder::add_desc_binding(VkShaderStageFlags stage_flags, uint32_t layout, uint32_t binding,
675                                   VkDescriptorType type, uint32_t count)
676 {
677    desc_layouts_used |= 1ull << layout;
678    desc_bindings[layout][num_desc_bindings[layout]++] = {binding, type, count, stage_flags, NULL};
679 }
680 
681 void
add_vertex_binding(uint32_t binding,uint32_t stride,VkVertexInputRate rate)682 PipelineBuilder::add_vertex_binding(uint32_t binding, uint32_t stride, VkVertexInputRate rate)
683 {
684    vs_bindings[vs_input.vertexBindingDescriptionCount++] = {binding, stride, rate};
685 }
686 
687 void
add_vertex_attribute(uint32_t location,uint32_t binding,VkFormat format,uint32_t offset)688 PipelineBuilder::add_vertex_attribute(uint32_t location, uint32_t binding, VkFormat format,
689                                       uint32_t offset)
690 {
691    vs_attributes[vs_input.vertexAttributeDescriptionCount++] = {location, binding, format, offset};
692 }
693 
694 void
add_resource_decls(QoShaderModuleCreateInfo * module)695 PipelineBuilder::add_resource_decls(QoShaderModuleCreateInfo* module)
696 {
697    for (unsigned i = 0; i < module->declarationCount; i++) {
698       const QoShaderDecl* decl = &module->pDeclarations[i];
699       switch (decl->decl_type) {
700       case QoShaderDeclType_ubo:
701          add_desc_binding(module->stage, decl->set, decl->binding,
702                           VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
703          break;
704       case QoShaderDeclType_ssbo:
705          add_desc_binding(module->stage, decl->set, decl->binding,
706                           VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
707          break;
708       case QoShaderDeclType_img_buf:
709          add_desc_binding(module->stage, decl->set, decl->binding,
710                           VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER);
711          break;
712       case QoShaderDeclType_img:
713          add_desc_binding(module->stage, decl->set, decl->binding,
714                           VK_DESCRIPTOR_TYPE_STORAGE_IMAGE);
715          break;
716       case QoShaderDeclType_tex_buf:
717          add_desc_binding(module->stage, decl->set, decl->binding,
718                           VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER);
719          break;
720       case QoShaderDeclType_combined:
721          add_desc_binding(module->stage, decl->set, decl->binding,
722                           VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
723          break;
724       case QoShaderDeclType_tex:
725          add_desc_binding(module->stage, decl->set, decl->binding,
726                           VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE);
727          break;
728       case QoShaderDeclType_samp:
729          add_desc_binding(module->stage, decl->set, decl->binding, VK_DESCRIPTOR_TYPE_SAMPLER);
730          break;
731       default: break;
732       }
733    }
734 }
735 
736 void
add_io_decls(QoShaderModuleCreateInfo * module)737 PipelineBuilder::add_io_decls(QoShaderModuleCreateInfo* module)
738 {
739    unsigned next_vtx_offset = 0;
740    for (unsigned i = 0; i < module->declarationCount; i++) {
741       const QoShaderDecl* decl = &module->pDeclarations[i];
742       switch (decl->decl_type) {
743       case QoShaderDeclType_in:
744          if (module->stage == VK_SHADER_STAGE_VERTEX_BIT) {
745             if (!strcmp(decl->type, "float") || decl->type[0] == 'v')
746                add_vertex_attribute(decl->location, 0, VK_FORMAT_R32G32B32A32_SFLOAT,
747                                     next_vtx_offset);
748             else if (decl->type[0] == 'u')
749                add_vertex_attribute(decl->location, 0, VK_FORMAT_R32G32B32A32_UINT,
750                                     next_vtx_offset);
751             else if (decl->type[0] == 'i')
752                add_vertex_attribute(decl->location, 0, VK_FORMAT_R32G32B32A32_SINT,
753                                     next_vtx_offset);
754             next_vtx_offset += 16;
755          }
756          break;
757       case QoShaderDeclType_out:
758          if (module->stage == VK_SHADER_STAGE_FRAGMENT_BIT) {
759             if (!strcmp(decl->type, "float") || decl->type[0] == 'v')
760                color_outputs[decl->location] = VK_FORMAT_R32G32B32A32_SFLOAT;
761             else if (decl->type[0] == 'u')
762                color_outputs[decl->location] = VK_FORMAT_R32G32B32A32_UINT;
763             else if (decl->type[0] == 'i')
764                color_outputs[decl->location] = VK_FORMAT_R32G32B32A32_SINT;
765          }
766          break;
767       default: break;
768       }
769    }
770    if (next_vtx_offset)
771       add_vertex_binding(0, next_vtx_offset);
772 }
773 
774 void
add_stage(VkShaderStageFlagBits stage,VkShaderModule module,const char * name)775 PipelineBuilder::add_stage(VkShaderStageFlagBits stage, VkShaderModule module, const char* name)
776 {
777    VkPipelineShaderStageCreateInfo* stage_info;
778    if (stage == VK_SHADER_STAGE_COMPUTE_BIT)
779       stage_info = &stages[0];
780    else
781       stage_info = &stages[gfx_pipeline_info.stageCount++];
782    stage_info->sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
783    stage_info->pNext = NULL;
784    stage_info->flags = 0;
785    stage_info->stage = stage;
786    stage_info->module = module;
787    stage_info->pName = name;
788    stage_info->pSpecializationInfo = NULL;
789    owned_stages |= stage;
790 }
791 
792 void
add_stage(VkShaderStageFlagBits stage,QoShaderModuleCreateInfo module,const char * name)793 PipelineBuilder::add_stage(VkShaderStageFlagBits stage, QoShaderModuleCreateInfo module,
794                            const char* name)
795 {
796    add_stage(stage, __qoCreateShaderModule(device, &module), name);
797    add_resource_decls(&module);
798    add_io_decls(&module);
799 }
800 
801 void
add_vsfs(VkShaderModule vs,VkShaderModule fs)802 PipelineBuilder::add_vsfs(VkShaderModule vs, VkShaderModule fs)
803 {
804    add_stage(VK_SHADER_STAGE_VERTEX_BIT, vs);
805    add_stage(VK_SHADER_STAGE_FRAGMENT_BIT, fs);
806 }
807 
808 void
add_vsfs(QoShaderModuleCreateInfo vs,QoShaderModuleCreateInfo fs)809 PipelineBuilder::add_vsfs(QoShaderModuleCreateInfo vs, QoShaderModuleCreateInfo fs)
810 {
811    add_stage(VK_SHADER_STAGE_VERTEX_BIT, vs);
812    add_stage(VK_SHADER_STAGE_FRAGMENT_BIT, fs);
813 }
814 
815 void
add_cs(VkShaderModule cs)816 PipelineBuilder::add_cs(VkShaderModule cs)
817 {
818    add_stage(VK_SHADER_STAGE_COMPUTE_BIT, cs);
819 }
820 
821 void
add_cs(QoShaderModuleCreateInfo cs)822 PipelineBuilder::add_cs(QoShaderModuleCreateInfo cs)
823 {
824    add_stage(VK_SHADER_STAGE_COMPUTE_BIT, cs);
825 }
826 
827 bool
is_compute()828 PipelineBuilder::is_compute()
829 {
830    return gfx_pipeline_info.stageCount == 0;
831 }
832 
833 void
create_compute_pipeline()834 PipelineBuilder::create_compute_pipeline()
835 {
836    VkComputePipelineCreateInfo create_info;
837    create_info.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
838    create_info.pNext = NULL;
839    create_info.flags = VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR;
840    create_info.stage = stages[0];
841    create_info.layout = pipeline_layout;
842    create_info.basePipelineHandle = VK_NULL_HANDLE;
843    create_info.basePipelineIndex = 0;
844 
845    ASSERTED VkResult result =
846       CreateComputePipelines(device, VK_NULL_HANDLE, 1, &create_info, NULL, &pipeline);
847    assert(result == VK_SUCCESS);
848 }
849 
850 void
create_graphics_pipeline()851 PipelineBuilder::create_graphics_pipeline()
852 {
853    /* create the create infos */
854    if (!samples)
855       samples = VK_SAMPLE_COUNT_1_BIT;
856 
857    unsigned num_color_attachments = 0;
858    VkPipelineColorBlendAttachmentState blend_attachment_states[16];
859    VkAttachmentReference color_attachments[16];
860    VkAttachmentDescription attachment_descs[17];
861    for (unsigned i = 0; i < 16; i++) {
862       if (color_outputs[i] == VK_FORMAT_UNDEFINED)
863          continue;
864 
865       VkAttachmentDescription* desc = &attachment_descs[num_color_attachments];
866       desc->flags = 0;
867       desc->format = color_outputs[i];
868       desc->samples = samples;
869       desc->loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
870       desc->storeOp = VK_ATTACHMENT_STORE_OP_STORE;
871       desc->stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
872       desc->stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE;
873       desc->initialLayout = VK_IMAGE_LAYOUT_GENERAL;
874       desc->finalLayout = VK_IMAGE_LAYOUT_GENERAL;
875 
876       VkAttachmentReference* ref = &color_attachments[num_color_attachments];
877       ref->attachment = num_color_attachments;
878       ref->layout = VK_IMAGE_LAYOUT_GENERAL;
879 
880       VkPipelineColorBlendAttachmentState* blend = &blend_attachment_states[num_color_attachments];
881       blend->blendEnable = false;
882       blend->colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |
883                               VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
884 
885       num_color_attachments++;
886    }
887 
888    unsigned num_attachments = num_color_attachments;
889    VkAttachmentReference ds_attachment;
890    if (ds_output != VK_FORMAT_UNDEFINED) {
891       VkAttachmentDescription* desc = &attachment_descs[num_attachments];
892       desc->flags = 0;
893       desc->format = ds_output;
894       desc->samples = samples;
895       desc->loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
896       desc->storeOp = VK_ATTACHMENT_STORE_OP_STORE;
897       desc->stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
898       desc->stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE;
899       desc->initialLayout = VK_IMAGE_LAYOUT_GENERAL;
900       desc->finalLayout = VK_IMAGE_LAYOUT_GENERAL;
901 
902       ds_attachment.attachment = num_color_attachments;
903       ds_attachment.layout = VK_IMAGE_LAYOUT_GENERAL;
904 
905       num_attachments++;
906    }
907 
908    vs_input.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
909    vs_input.pNext = NULL;
910    vs_input.flags = 0;
911    vs_input.pVertexBindingDescriptions = vs_bindings;
912    vs_input.pVertexAttributeDescriptions = vs_attributes;
913 
914    VkPipelineInputAssemblyStateCreateInfo assembly_state;
915    assembly_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
916    assembly_state.pNext = NULL;
917    assembly_state.flags = 0;
918    assembly_state.topology = topology;
919    assembly_state.primitiveRestartEnable = false;
920 
921    VkPipelineTessellationStateCreateInfo tess_state;
922    tess_state.sType = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO;
923    tess_state.pNext = NULL;
924    tess_state.flags = 0;
925    tess_state.patchControlPoints = patch_size;
926 
927    VkPipelineViewportStateCreateInfo viewport_state;
928    viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
929    viewport_state.pNext = NULL;
930    viewport_state.flags = 0;
931    viewport_state.viewportCount = 1;
932    viewport_state.pViewports = NULL;
933    viewport_state.scissorCount = 1;
934    viewport_state.pScissors = NULL;
935 
936    VkPipelineRasterizationStateCreateInfo rasterization_state;
937    rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
938    rasterization_state.pNext = NULL;
939    rasterization_state.flags = 0;
940    rasterization_state.depthClampEnable = false;
941    rasterization_state.rasterizerDiscardEnable = false;
942    rasterization_state.polygonMode = VK_POLYGON_MODE_FILL;
943    rasterization_state.cullMode = VK_CULL_MODE_NONE;
944    rasterization_state.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
945    rasterization_state.depthBiasEnable = false;
946    rasterization_state.lineWidth = 1.0;
947 
948    VkPipelineMultisampleStateCreateInfo ms_state;
949    ms_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
950    ms_state.pNext = NULL;
951    ms_state.flags = 0;
952    ms_state.rasterizationSamples = samples;
953    ms_state.sampleShadingEnable = sample_shading_enable;
954    ms_state.minSampleShading = min_sample_shading;
955    VkSampleMask sample_mask = 0xffffffff;
956    ms_state.pSampleMask = &sample_mask;
957    ms_state.alphaToCoverageEnable = false;
958    ms_state.alphaToOneEnable = false;
959 
960    VkPipelineDepthStencilStateCreateInfo ds_state;
961    ds_state.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
962    ds_state.pNext = NULL;
963    ds_state.flags = 0;
964    ds_state.depthTestEnable = ds_output != VK_FORMAT_UNDEFINED;
965    ds_state.depthWriteEnable = true;
966    ds_state.depthCompareOp = VK_COMPARE_OP_ALWAYS;
967    ds_state.depthBoundsTestEnable = false;
968    ds_state.stencilTestEnable = true;
969    ds_state.front.failOp = VK_STENCIL_OP_KEEP;
970    ds_state.front.passOp = VK_STENCIL_OP_REPLACE;
971    ds_state.front.depthFailOp = VK_STENCIL_OP_REPLACE;
972    ds_state.front.compareOp = VK_COMPARE_OP_ALWAYS;
973    ds_state.front.compareMask = 0xffffffff, ds_state.front.writeMask = 0;
974    ds_state.front.reference = 0;
975    ds_state.back = ds_state.front;
976 
977    VkPipelineColorBlendStateCreateInfo color_blend_state;
978    color_blend_state.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
979    color_blend_state.pNext = NULL;
980    color_blend_state.flags = 0;
981    color_blend_state.logicOpEnable = false;
982    color_blend_state.attachmentCount = num_color_attachments;
983    color_blend_state.pAttachments = blend_attachment_states;
984 
985    VkDynamicState dynamic_states[9] = {VK_DYNAMIC_STATE_VIEWPORT,
986                                        VK_DYNAMIC_STATE_SCISSOR,
987                                        VK_DYNAMIC_STATE_LINE_WIDTH,
988                                        VK_DYNAMIC_STATE_DEPTH_BIAS,
989                                        VK_DYNAMIC_STATE_BLEND_CONSTANTS,
990                                        VK_DYNAMIC_STATE_DEPTH_BOUNDS,
991                                        VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK,
992                                        VK_DYNAMIC_STATE_STENCIL_WRITE_MASK,
993                                        VK_DYNAMIC_STATE_STENCIL_REFERENCE};
994 
995    VkPipelineDynamicStateCreateInfo dynamic_state;
996    dynamic_state.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
997    dynamic_state.pNext = NULL;
998    dynamic_state.flags = 0;
999    dynamic_state.dynamicStateCount = sizeof(dynamic_states) / sizeof(VkDynamicState);
1000    dynamic_state.pDynamicStates = dynamic_states;
1001 
1002    gfx_pipeline_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
1003    gfx_pipeline_info.pNext = NULL;
1004    gfx_pipeline_info.flags = VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR;
1005    gfx_pipeline_info.pVertexInputState = &vs_input;
1006    gfx_pipeline_info.pInputAssemblyState = &assembly_state;
1007    gfx_pipeline_info.pTessellationState = &tess_state;
1008    gfx_pipeline_info.pViewportState = &viewport_state;
1009    gfx_pipeline_info.pRasterizationState = &rasterization_state;
1010    gfx_pipeline_info.pMultisampleState = &ms_state;
1011    gfx_pipeline_info.pDepthStencilState = &ds_state;
1012    gfx_pipeline_info.pColorBlendState = &color_blend_state;
1013    gfx_pipeline_info.pDynamicState = &dynamic_state;
1014    gfx_pipeline_info.subpass = 0;
1015 
1016    /* create the objects used to create the pipeline */
1017    VkSubpassDescription subpass;
1018    subpass.flags = 0;
1019    subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
1020    subpass.inputAttachmentCount = 0;
1021    subpass.pInputAttachments = NULL;
1022    subpass.colorAttachmentCount = num_color_attachments;
1023    subpass.pColorAttachments = color_attachments;
1024    subpass.pResolveAttachments = NULL;
1025    subpass.pDepthStencilAttachment = ds_output == VK_FORMAT_UNDEFINED ? NULL : &ds_attachment;
1026    subpass.preserveAttachmentCount = 0;
1027    subpass.pPreserveAttachments = NULL;
1028 
1029    VkRenderPassCreateInfo renderpass_info;
1030    renderpass_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
1031    renderpass_info.pNext = NULL;
1032    renderpass_info.flags = 0;
1033    renderpass_info.attachmentCount = num_attachments;
1034    renderpass_info.pAttachments = attachment_descs;
1035    renderpass_info.subpassCount = 1;
1036    renderpass_info.pSubpasses = &subpass;
1037    renderpass_info.dependencyCount = 0;
1038    renderpass_info.pDependencies = NULL;
1039 
1040    ASSERTED VkResult result = CreateRenderPass(device, &renderpass_info, NULL, &render_pass);
1041    assert(result == VK_SUCCESS);
1042 
1043    gfx_pipeline_info.layout = pipeline_layout;
1044    gfx_pipeline_info.renderPass = render_pass;
1045 
1046    /* create the pipeline */
1047    gfx_pipeline_info.pStages = stages;
1048 
1049    result = CreateGraphicsPipelines(device, VK_NULL_HANDLE, 1, &gfx_pipeline_info, NULL, &pipeline);
1050    assert(result == VK_SUCCESS);
1051 }
1052 
1053 void
create_pipeline()1054 PipelineBuilder::create_pipeline()
1055 {
1056    unsigned num_desc_layouts = 0;
1057    for (unsigned i = 0; i < 64; i++) {
1058       if (!(desc_layouts_used & (1ull << i)))
1059          continue;
1060 
1061       VkDescriptorSetLayoutCreateInfo desc_layout_info;
1062       desc_layout_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
1063       desc_layout_info.pNext = NULL;
1064       desc_layout_info.flags = 0;
1065       desc_layout_info.bindingCount = num_desc_bindings[i];
1066       desc_layout_info.pBindings = desc_bindings[i];
1067 
1068       ASSERTED VkResult result = CreateDescriptorSetLayout(device, &desc_layout_info, NULL,
1069                                                            &desc_layouts[num_desc_layouts]);
1070       assert(result == VK_SUCCESS);
1071       num_desc_layouts++;
1072    }
1073 
1074    VkPipelineLayoutCreateInfo pipeline_layout_info;
1075    pipeline_layout_info.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
1076    pipeline_layout_info.pNext = NULL;
1077    pipeline_layout_info.flags = 0;
1078    pipeline_layout_info.pushConstantRangeCount = 1;
1079    pipeline_layout_info.pPushConstantRanges = &push_constant_range;
1080    pipeline_layout_info.setLayoutCount = num_desc_layouts;
1081    pipeline_layout_info.pSetLayouts = desc_layouts;
1082 
1083    ASSERTED VkResult result =
1084       CreatePipelineLayout(device, &pipeline_layout_info, NULL, &pipeline_layout);
1085    assert(result == VK_SUCCESS);
1086 
1087    if (is_compute())
1088       create_compute_pipeline();
1089    else
1090       create_graphics_pipeline();
1091 }
1092 
1093 void
print_ir(VkShaderStageFlagBits stage_flags,const char * name,bool remove_encoding)1094 PipelineBuilder::print_ir(VkShaderStageFlagBits stage_flags, const char* name, bool remove_encoding)
1095 {
1096    if (!pipeline)
1097       create_pipeline();
1098    print_pipeline_ir(device, pipeline, stage_flags, name, remove_encoding);
1099 }
1100