1 /*
2 * Copyright © 2022 Imagination Technologies Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a copy
5 * of this software and associated documentation files (the "Software"), to deal
6 * in the Software without restriction, including without limitation the rights
7 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 * copies of the Software, and to permit persons to whom the Software is
9 * furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include <stdbool.h>
25 #include <stddef.h>
26 #include <stdint.h>
27 #include <stdio.h>
28
29 #include "compiler/shader_enums.h"
30 #include "compiler/spirv/nir_spirv.h"
31 #include "nir/nir.h"
32 #include "rogue.h"
33 #include "rogue_build_data.h"
34 #include "rogue_compiler.h"
35 #include "rogue_constreg.h"
36 #include "rogue_encode.h"
37 #include "rogue_nir.h"
38 #include "rogue_nir_helpers.h"
39 #include "rogue_operand.h"
40 #include "rogue_regalloc.h"
41 #include "rogue_shader.h"
42 #include "rogue_validate.h"
43 #include "util/macros.h"
44 #include "util/memstream.h"
45 #include "util/ralloc.h"
46
47 /**
48 * \file rogue.c
49 *
50 * \brief Contains the top-level Rogue compiler interface for Vulkan driver and
51 * the offline compiler.
52 */
53
54 /**
55 * \brief Converts a SPIR-V shader to NIR.
56 *
57 * \param[in] ctx Shared multi-stage build context.
58 * \param[in] stage Shader stage.
59 * \param[in] spirv_size SPIR-V data length in DWORDs.
60 * \param[in] spirv_data SPIR-V data.
61 * \param[in] num_spec Number of SPIR-V specializations.
62 * \param[in] spec SPIR-V specializations.
63 * \return A nir_shader* if successful, or NULL if unsuccessful.
64 */
rogue_spirv_to_nir(struct rogue_build_ctx * ctx,gl_shader_stage stage,const char * entry,size_t spirv_size,const uint32_t * spirv_data,unsigned num_spec,struct nir_spirv_specialization * spec)65 nir_shader *rogue_spirv_to_nir(struct rogue_build_ctx *ctx,
66 gl_shader_stage stage,
67 const char *entry,
68 size_t spirv_size,
69 const uint32_t *spirv_data,
70 unsigned num_spec,
71 struct nir_spirv_specialization *spec)
72 {
73 nir_shader *nir;
74
75 nir = spirv_to_nir(spirv_data,
76 spirv_size,
77 spec,
78 num_spec,
79 stage,
80 entry,
81 rogue_get_spirv_options(ctx->compiler),
82 rogue_get_compiler_options(ctx->compiler));
83 if (!nir)
84 return NULL;
85
86 ralloc_steal(ctx, nir);
87
88 /* Apply passes. */
89 if (!rogue_nir_passes(ctx, nir, stage)) {
90 ralloc_free(nir);
91 return NULL;
92 }
93
94 /* Collect I/O data to pass back to the driver. */
95 if (!rogue_collect_io_data(ctx, nir)) {
96 ralloc_free(nir);
97 return NULL;
98 }
99
100 return nir;
101 }
102
103 /**
104 * \brief Converts a Rogue shader to binary.
105 *
106 * \param[in] ctx Shared multi-stage build context.
107 * \param[in] shader Rogue shader.
108 * \return A rogue_shader_binary* if successful, or NULL if unsuccessful.
109 */
rogue_to_binary(struct rogue_build_ctx * ctx,const struct rogue_shader * shader)110 struct rogue_shader_binary *rogue_to_binary(struct rogue_build_ctx *ctx,
111 const struct rogue_shader *shader)
112 {
113 struct rogue_shader_binary *binary;
114 struct u_memstream mem;
115 size_t buf_size;
116 char *buf;
117
118 if (!rogue_validate_shader(shader))
119 return NULL;
120
121 if (!u_memstream_open(&mem, &buf, &buf_size))
122 return NULL;
123
124 if (!rogue_encode_shader(shader, u_memstream_get(&mem))) {
125 u_memstream_close(&mem);
126 free(buf);
127 return NULL;
128 }
129
130 u_memstream_close(&mem);
131
132 binary = rzalloc_size(ctx, sizeof(*binary) + buf_size);
133 if (!binary) {
134 free(buf);
135 return NULL;
136 }
137
138 binary->size = buf_size;
139 memcpy(binary->data, buf, buf_size);
140
141 free(buf);
142
143 return binary;
144 }
145
146 static bool
setup_alu_dest(struct rogue_instr * instr,size_t dest_index,nir_alu_instr * alu)147 setup_alu_dest(struct rogue_instr *instr, size_t dest_index, nir_alu_instr *alu)
148 {
149 assert(dest_index == 0);
150
151 /* Dest validation. */
152 assert(nir_dest_num_components(alu->dest.dest) == 1 ||
153 nir_dest_num_components(alu->dest.dest) == 4);
154 assert(nir_dest_bit_size(alu->dest.dest) == 32);
155
156 size_t nir_dest_reg = nir_alu_dest_regindex(alu);
157
158 if (nir_dest_num_components(alu->dest.dest) == 1) {
159 CHECK(rogue_instr_set_operand_vreg(instr, dest_index, nir_dest_reg));
160 } else {
161 size_t comp = nir_alu_dest_comp(alu);
162 CHECK(rogue_instr_set_operand_vreg_vec(instr,
163 dest_index,
164 comp,
165 nir_dest_reg));
166 }
167
168 return true;
169 }
170
trans_constreg_operand(struct rogue_instr * instr,size_t operand_index,uint32_t const_value)171 static bool trans_constreg_operand(struct rogue_instr *instr,
172 size_t operand_index,
173 uint32_t const_value)
174 {
175 size_t const_reg = rogue_constreg_lookup(const_value);
176
177 /* Only values that can be sourced from const regs should be left from the
178 * rogue_nir_constreg pass.
179 */
180 assert(const_reg != ROGUE_NO_CONST_REG);
181
182 CHECK(rogue_instr_set_operand_reg(instr,
183 operand_index,
184 ROGUE_OPERAND_TYPE_REG_CONST,
185 const_reg));
186
187 return true;
188 }
189
trans_nir_alu_fmax(struct rogue_shader * shader,nir_alu_instr * alu)190 static bool trans_nir_alu_fmax(struct rogue_shader *shader, nir_alu_instr *alu)
191 {
192 /* Src validation. */
193 assert(nir_src_num_components(alu->src[0].src) == 1);
194 assert(nir_src_bit_size(alu->src[0].src) == 32);
195
196 assert(nir_src_num_components(alu->src[1].src) == 1);
197 assert(nir_src_bit_size(alu->src[1].src) == 32);
198
199 struct rogue_instr *instr = rogue_shader_insert(shader, ROGUE_OP_MAX);
200
201 CHECK(setup_alu_dest(instr, 0, alu));
202
203 for (size_t u = 0; u < nir_op_infos[nir_op_fmax].num_inputs; ++u) {
204 /* Handle values that can be pulled from const regs. */
205 if (nir_alu_src_is_const(alu, u)) {
206 CHECK(trans_constreg_operand(instr, u + 1, nir_alu_src_const(alu, u)));
207 continue;
208 }
209
210 size_t nir_src_reg = nir_alu_src_regindex(alu, u);
211
212 CHECK(rogue_instr_set_operand_vreg(instr, u + 1, nir_src_reg));
213 }
214
215 return true;
216 }
217
trans_nir_alu_fmin(struct rogue_shader * shader,nir_alu_instr * alu)218 static bool trans_nir_alu_fmin(struct rogue_shader *shader, nir_alu_instr *alu)
219 {
220 /* Src validation. */
221 assert(nir_src_num_components(alu->src[0].src) == 1);
222 assert(nir_src_bit_size(alu->src[0].src) == 32);
223
224 assert(nir_src_num_components(alu->src[1].src) == 1);
225 assert(nir_src_bit_size(alu->src[1].src) == 32);
226
227 struct rogue_instr *instr = rogue_shader_insert(shader, ROGUE_OP_MIN);
228
229 CHECK(setup_alu_dest(instr, 0, alu));
230
231 for (size_t u = 0; u < nir_op_infos[nir_op_fmin].num_inputs; ++u) {
232 /* Handle values that can be pulled from const regs. */
233 if (nir_alu_src_is_const(alu, u)) {
234 CHECK(trans_constreg_operand(instr, u + 1, nir_alu_src_const(alu, u)));
235 continue;
236 }
237
238 size_t nir_src_reg = nir_alu_src_regindex(alu, u);
239
240 CHECK(rogue_instr_set_operand_vreg(instr, u + 1, nir_src_reg));
241 }
242
243 return true;
244 }
245
trans_nir_alu_mov_imm(struct rogue_shader * shader,nir_alu_instr * alu)246 static bool trans_nir_alu_mov_imm(struct rogue_shader *shader,
247 nir_alu_instr *alu)
248 {
249 /* Src validation. */
250 assert(nir_src_num_components(alu->src[0].src) == 1);
251 assert(nir_src_bit_size(alu->src[0].src) == 32);
252
253 uint32_t value = nir_alu_src_const(alu, 0);
254
255 struct rogue_instr *instr = rogue_shader_insert(shader, ROGUE_OP_MOV_IMM);
256
257 CHECK(setup_alu_dest(instr, 0, alu));
258 CHECK(rogue_instr_set_operand_imm(instr, 1, value));
259
260 return true;
261 }
262
trans_nir_alu_mov(struct rogue_shader * shader,nir_alu_instr * alu)263 static bool trans_nir_alu_mov(struct rogue_shader *shader, nir_alu_instr *alu)
264 {
265 /* Constant value that isn't in constregs. */
266 if (nir_alu_src_is_const(alu, 0) &&
267 nir_dest_num_components(alu->dest.dest) == 1)
268 return trans_nir_alu_mov_imm(shader, alu);
269
270 /* Src validation. */
271 assert(nir_src_num_components(alu->src[0].src) == 1);
272 assert(nir_src_bit_size(alu->src[0].src) == 32);
273
274 struct rogue_instr *instr = rogue_shader_insert(shader, ROGUE_OP_MOV);
275
276 CHECK(setup_alu_dest(instr, 0, alu));
277
278 /* Handle values that can be pulled from const regs. */
279 if (nir_alu_src_is_const(alu, 0)) {
280 return trans_constreg_operand(instr, 1, nir_alu_src_const(alu, 0));
281 }
282
283 size_t nir_src_reg = nir_alu_src_regindex(alu, 0);
284 CHECK(rogue_instr_set_operand_vreg(instr, 1, nir_src_reg));
285
286 return true;
287 }
288
trans_nir_alu_pack_unorm_4x8(struct rogue_shader * shader,nir_alu_instr * alu)289 static bool trans_nir_alu_pack_unorm_4x8(struct rogue_shader *shader,
290 nir_alu_instr *alu)
291 {
292 /* Src/dest validation. */
293 assert(nir_dest_num_components(alu->dest.dest) == 1);
294 assert(nir_dest_bit_size(alu->dest.dest) == 32);
295
296 assert(nir_src_num_components(alu->src[0].src) == 4);
297 assert(nir_src_bit_size(alu->src[0].src) == 32);
298
299 size_t nir_src_reg = nir_alu_src_regindex(alu, 0);
300 size_t nir_dest_reg = nir_alu_dest_regindex(alu);
301
302 struct rogue_instr *instr = rogue_shader_insert(shader, ROGUE_OP_PACK_U8888);
303
304 CHECK(rogue_instr_set_operand_vreg(instr, 0, nir_dest_reg));
305
306 /* Ensure all 4 components are being sourced in order. */
307 for (size_t u = 0; u < nir_src_num_components(alu->src[0].src); ++u)
308 assert(alu->src->swizzle[u] == u);
309
310 CHECK(rogue_instr_set_operand_vreg_vec(instr,
311 1,
312 ROGUE_COMPONENT_ALL,
313 nir_src_reg));
314
315 return true;
316 }
317
trans_nir_alu_fmul(struct rogue_shader * shader,nir_alu_instr * alu)318 static bool trans_nir_alu_fmul(struct rogue_shader *shader, nir_alu_instr *alu)
319 {
320 /* Src validation. */
321 assert(nir_src_num_components(alu->src[0].src) == 1);
322 assert(nir_src_bit_size(alu->src[0].src) == 32);
323
324 assert(nir_src_num_components(alu->src[1].src) == 1);
325 assert(nir_src_bit_size(alu->src[1].src) == 32);
326
327 size_t nir_in_reg_a = nir_alu_src_regindex(alu, 0);
328 size_t nir_in_reg_b = nir_alu_src_regindex(alu, 1);
329
330 struct rogue_instr *instr = rogue_shader_insert(shader, ROGUE_OP_MUL);
331
332 CHECK(setup_alu_dest(instr, 0, alu));
333 CHECK(rogue_instr_set_operand_vreg(instr, 1, nir_in_reg_a));
334 CHECK(rogue_instr_set_operand_vreg(instr, 2, nir_in_reg_b));
335
336 return true;
337 }
338
trans_nir_alu_ffma(struct rogue_shader * shader,nir_alu_instr * alu)339 static bool trans_nir_alu_ffma(struct rogue_shader *shader, nir_alu_instr *alu)
340 {
341 /* Src validation. */
342 assert(nir_src_num_components(alu->src[0].src) == 1);
343 assert(nir_src_bit_size(alu->src[0].src) == 32);
344
345 assert(nir_src_num_components(alu->src[1].src) == 1);
346 assert(nir_src_bit_size(alu->src[1].src) == 32);
347
348 assert(nir_src_num_components(alu->src[2].src) == 1);
349 assert(nir_src_bit_size(alu->src[2].src) == 32);
350
351 size_t nir_in_reg_a = nir_alu_src_regindex(alu, 0);
352 size_t nir_in_reg_b = nir_alu_src_regindex(alu, 1);
353 size_t nir_in_reg_c = nir_alu_src_regindex(alu, 2);
354
355 struct rogue_instr *instr = rogue_shader_insert(shader, ROGUE_OP_FMA);
356
357 CHECK(setup_alu_dest(instr, 0, alu));
358 CHECK(rogue_instr_set_operand_vreg(instr, 1, nir_in_reg_a));
359 CHECK(rogue_instr_set_operand_vreg(instr, 2, nir_in_reg_b));
360 CHECK(rogue_instr_set_operand_vreg(instr, 3, nir_in_reg_c));
361
362 return true;
363 }
364
trans_nir_alu(struct rogue_shader * shader,nir_alu_instr * alu)365 static bool trans_nir_alu(struct rogue_shader *shader, nir_alu_instr *alu)
366 {
367 switch (alu->op) {
368 case nir_op_fmax:
369 return trans_nir_alu_fmax(shader, alu);
370
371 case nir_op_fmin:
372 return trans_nir_alu_fmin(shader, alu);
373
374 case nir_op_pack_unorm_4x8:
375 return trans_nir_alu_pack_unorm_4x8(shader, alu);
376
377 case nir_op_mov:
378 return trans_nir_alu_mov(shader, alu);
379
380 case nir_op_fmul:
381 return trans_nir_alu_fmul(shader, alu);
382
383 case nir_op_ffma:
384 return trans_nir_alu_ffma(shader, alu);
385
386 default:
387 break;
388 }
389
390 unreachable("Unimplemented NIR ALU instruction.");
391 }
392
trans_nir_intrinsic_load_input_fs(struct rogue_shader * shader,nir_intrinsic_instr * intr)393 static bool trans_nir_intrinsic_load_input_fs(struct rogue_shader *shader,
394 nir_intrinsic_instr *intr)
395 {
396 struct rogue_fs_build_data *fs_data = &shader->ctx->stage_data.fs;
397
398 /* Src/dest validation. */
399 assert(nir_dest_num_components(intr->dest) == 1);
400 assert(nir_dest_bit_size(intr->dest) == 32);
401
402 assert(nir_src_num_components(intr->src[0]) == 1);
403 assert(nir_src_bit_size(intr->src[0]) == 32);
404 assert(nir_intr_src_is_const(intr, 0));
405
406 /* Intrinsic index validation. */
407 assert(nir_intrinsic_dest_type(intr) == nir_type_float32);
408
409 struct nir_io_semantics io_semantics = nir_intrinsic_io_semantics(intr);
410 size_t component = nir_intrinsic_component(intr);
411 size_t coeff_index = rogue_coeff_index_fs(&fs_data->iterator_args,
412 io_semantics.location,
413 component);
414 size_t wcoeff_index = rogue_coeff_index_fs(&fs_data->iterator_args, ~0, 0);
415 size_t drc_num = rogue_acquire_drc(shader);
416 uint64_t source_count = nir_dest_num_components(intr->dest);
417
418 size_t nir_dest_reg = nir_intr_dest_regindex(intr);
419
420 /* pixiter.w instruction. */
421 struct rogue_instr *instr = rogue_shader_insert(shader, ROGUE_OP_PIX_ITER_W);
422
423 CHECK(rogue_instr_set_operand_vreg(instr, 0, nir_dest_reg));
424 CHECK(rogue_instr_set_operand_drc(instr, 1, drc_num));
425 CHECK(rogue_instr_set_operand_reg(instr,
426 2,
427 ROGUE_OPERAND_TYPE_REG_COEFF,
428 coeff_index));
429 CHECK(rogue_instr_set_operand_reg(instr,
430 3,
431 ROGUE_OPERAND_TYPE_REG_COEFF,
432 wcoeff_index));
433 CHECK(rogue_instr_set_operand_imm(instr, 4, source_count));
434
435 /* wdf instruction must follow the pixiter.w. */
436 instr = rogue_shader_insert(shader, ROGUE_OP_WDF);
437
438 CHECK(rogue_instr_set_operand_drc(instr, 0, drc_num));
439 rogue_release_drc(shader, drc_num);
440
441 return true;
442 }
443
trans_nir_intrinsic_load_input_vs(struct rogue_shader * shader,nir_intrinsic_instr * intr)444 static bool trans_nir_intrinsic_load_input_vs(struct rogue_shader *shader,
445 nir_intrinsic_instr *intr)
446 {
447 /* Src/dest validation. */
448 assert(nir_dest_num_components(intr->dest) == 1);
449 assert(nir_dest_bit_size(intr->dest) == 32);
450
451 assert(nir_src_num_components(intr->src[0]) == 1);
452 assert(nir_src_bit_size(intr->src[0]) == 32);
453 assert(nir_intr_src_is_const(intr, 0));
454
455 /* Intrinsic index validation. */
456 assert(nir_intrinsic_dest_type(intr) == nir_type_float32);
457
458 size_t component = nir_intrinsic_component(intr);
459 struct nir_io_semantics io_semantics = nir_intrinsic_io_semantics(intr);
460 size_t vi_reg_index = ((io_semantics.location - VERT_ATTRIB_GENERIC0) * 3) +
461 component; /* TODO: get these properly with the
462 * intrinsic index (ssa argument)
463 */
464
465 size_t nir_dest_reg = nir_intr_dest_regindex(intr);
466
467 struct rogue_instr *instr = rogue_shader_insert(shader, ROGUE_OP_MOV);
468
469 CHECK(rogue_instr_set_operand_vreg(instr, 0, nir_dest_reg));
470 CHECK(rogue_instr_set_operand_reg(instr,
471 1,
472 ROGUE_OPERAND_TYPE_REG_VERTEX_IN,
473 vi_reg_index));
474
475 return true;
476 }
477
trans_nir_intrinsic_load_input(struct rogue_shader * shader,nir_intrinsic_instr * intr)478 static bool trans_nir_intrinsic_load_input(struct rogue_shader *shader,
479 nir_intrinsic_instr *intr)
480 {
481 switch (shader->stage) {
482 case MESA_SHADER_FRAGMENT:
483 return trans_nir_intrinsic_load_input_fs(shader, intr);
484
485 case MESA_SHADER_VERTEX:
486 return trans_nir_intrinsic_load_input_vs(shader, intr);
487
488 default:
489 break;
490 }
491
492 unreachable("Unimplemented NIR load_input variant.");
493 }
494
trans_nir_intrinsic_store_output_fs(struct rogue_shader * shader,nir_intrinsic_instr * intr)495 static bool trans_nir_intrinsic_store_output_fs(struct rogue_shader *shader,
496 nir_intrinsic_instr *intr)
497 {
498 /* Src/dest validation. */
499 assert(nir_src_num_components(intr->src[0]) == 1);
500 assert(nir_src_bit_size(intr->src[0]) == 32);
501 assert(!nir_intr_src_is_const(intr, 0));
502
503 assert(nir_src_num_components(intr->src[1]) == 1);
504 assert(nir_src_bit_size(intr->src[1]) == 32);
505 assert(nir_intr_src_is_const(intr, 1));
506
507 /* Intrinsic index validation. */
508 assert(nir_intrinsic_src_type(intr) == nir_type_uint32);
509
510 /* Fetch the output offset. */
511 /* TODO: Is this really the right value to use for pixel out reg. num? */
512 size_t offset = nir_intr_src_const(intr, 1);
513
514 /* Fetch the components. */
515 size_t src_reg = nir_intr_src_regindex(intr, 0);
516
517 /* mov.olchk instruction. */
518 struct rogue_instr *instr = rogue_shader_insert(shader, ROGUE_OP_MOV);
519
520 CHECK(rogue_instr_set_operand_reg(instr,
521 0,
522 ROGUE_OPERAND_TYPE_REG_PIXEL_OUT,
523 offset));
524 CHECK(rogue_instr_set_operand_vreg(instr, 1, src_reg));
525 CHECK(rogue_instr_set_flag(instr, ROGUE_INSTR_FLAG_OLCHK));
526
527 return true;
528 }
529
trans_nir_intrinsic_store_output_vs(struct rogue_shader * shader,nir_intrinsic_instr * intr)530 static bool trans_nir_intrinsic_store_output_vs(struct rogue_shader *shader,
531 nir_intrinsic_instr *intr)
532 {
533 struct rogue_vs_build_data *vs_data = &shader->ctx->stage_data.vs;
534
535 /* Src/dest validation. */
536 assert(nir_src_num_components(intr->src[0]) == 1);
537 assert(nir_src_bit_size(intr->src[0]) == 32);
538 assert(!nir_intr_src_is_const(intr, 0));
539
540 assert(nir_src_num_components(intr->src[1]) == 1);
541 assert(nir_src_bit_size(intr->src[1]) == 32);
542 assert(nir_intr_src_is_const(intr, 1));
543
544 /* Intrinsic index validation. */
545 assert(nir_intrinsic_src_type(intr) == nir_type_float32);
546 assert(util_bitcount(nir_intrinsic_write_mask(intr)) == 1);
547
548 struct nir_io_semantics io_semantics = nir_intrinsic_io_semantics(intr);
549 size_t component = nir_intrinsic_component(intr);
550 size_t vo_index = rogue_output_index_vs(&vs_data->outputs,
551 io_semantics.location,
552 component);
553
554 size_t src_reg = nir_intr_src_regindex(intr, 0);
555
556 struct rogue_instr *instr = rogue_shader_insert(shader, ROGUE_OP_VTXOUT);
557
558 CHECK(rogue_instr_set_operand_imm(instr, 0, vo_index));
559 CHECK(rogue_instr_set_operand_vreg(instr, 1, src_reg));
560
561 return true;
562 }
563
trans_nir_intrinsic_store_output(struct rogue_shader * shader,nir_intrinsic_instr * intr)564 static bool trans_nir_intrinsic_store_output(struct rogue_shader *shader,
565 nir_intrinsic_instr *intr)
566 {
567 switch (shader->stage) {
568 case MESA_SHADER_FRAGMENT:
569 return trans_nir_intrinsic_store_output_fs(shader, intr);
570
571 case MESA_SHADER_VERTEX:
572 return trans_nir_intrinsic_store_output_vs(shader, intr);
573
574 default:
575 break;
576 }
577
578 unreachable("Unimplemented NIR store_output variant.");
579 }
580
trans_nir_intrinsic_load_ubo(struct rogue_shader * shader,nir_intrinsic_instr * intr)581 static bool trans_nir_intrinsic_load_ubo(struct rogue_shader *shader,
582 nir_intrinsic_instr *intr)
583 {
584 struct rogue_ubo_data *ubo_data =
585 &shader->ctx->common_data[shader->stage].ubo_data;
586
587 /* Src/dest validation. */
588 assert(nir_dest_num_components(intr->dest) == 1);
589 assert(nir_dest_bit_size(intr->dest) == 32);
590
591 assert(nir_src_num_components(intr->src[0]) == 2);
592 assert(nir_src_bit_size(intr->src[0]) == 32);
593 assert(nir_intr_src_is_const(intr, 0));
594
595 assert(nir_src_num_components(intr->src[1]) == 1);
596 assert(nir_src_bit_size(intr->src[1]) == 32);
597 assert(nir_intr_src_is_const(intr, 1));
598
599 /* Intrinsic index validation. */
600 assert((nir_intrinsic_range_base(intr) % ROGUE_REG_SIZE_BYTES) == 0);
601 assert(nir_intrinsic_range(intr) == ROGUE_REG_SIZE_BYTES);
602
603 size_t nir_dest_reg = nir_intr_dest_regindex(intr);
604
605 size_t desc_set = nir_intr_src_comp_const(intr, 0, 0);
606 size_t binding = nir_intr_src_comp_const(intr, 0, 1);
607 size_t offset = nir_intrinsic_range_base(intr);
608
609 size_t sh_num = rogue_ubo_reg(ubo_data, desc_set, binding, offset);
610
611 struct rogue_instr *instr = rogue_shader_insert(shader, ROGUE_OP_MOV);
612
613 CHECK(rogue_instr_set_operand_vreg(instr, 0, nir_dest_reg));
614 CHECK(rogue_instr_set_operand_reg(instr,
615 1,
616 ROGUE_OPERAND_TYPE_REG_SHARED,
617 sh_num));
618 return true;
619 }
620
trans_nir_intrinsic(struct rogue_shader * shader,nir_intrinsic_instr * intr)621 static bool trans_nir_intrinsic(struct rogue_shader *shader,
622 nir_intrinsic_instr *intr)
623 {
624 switch (intr->intrinsic) {
625 case nir_intrinsic_load_input:
626 return trans_nir_intrinsic_load_input(shader, intr);
627
628 case nir_intrinsic_store_output:
629 return trans_nir_intrinsic_store_output(shader, intr);
630
631 case nir_intrinsic_load_ubo:
632 return trans_nir_intrinsic_load_ubo(shader, intr);
633
634 default:
635 break;
636 }
637
638 unreachable("Unimplemented NIR intrinsic instruction.");
639 }
640
trans_nir_load_const(struct rogue_shader * shader,nir_load_const_instr * load_const)641 static bool trans_nir_load_const(struct rogue_shader *shader,
642 nir_load_const_instr *load_const)
643 {
644 /* Src/dest validation. */
645 assert(load_const->def.bit_size == 32);
646
647 /* Ensure that two-component load_consts are used only by load_ubos. */
648 if (load_const->def.num_components == 2) {
649 nir_foreach_use (use_src, &load_const->def) {
650 nir_instr *instr = use_src->parent_instr;
651 assert(instr->type == nir_instr_type_intrinsic);
652
653 ASSERTED nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
654 assert(intr->intrinsic == nir_intrinsic_load_ubo);
655 }
656 } else {
657 assert(load_const->def.num_components == 1);
658 }
659
660 /* TODO: This is currently done in MOV_IMM, but instead now would be the
661 * time to lookup the constant value, see if it lives in const regs, or if
662 * it needs to generate a MOV_IMM (or be constant calc-ed).
663 */
664 return true;
665 }
666
trans_nir_jump_return(struct rogue_shader * shader,nir_jump_instr * jump)667 static bool trans_nir_jump_return(struct rogue_shader *shader,
668 nir_jump_instr *jump)
669 {
670 enum rogue_opcode return_op;
671
672 switch (shader->stage) {
673 case MESA_SHADER_FRAGMENT:
674 return_op = ROGUE_OP_END_FRAG;
675 break;
676
677 case MESA_SHADER_VERTEX:
678 return_op = ROGUE_OP_END_VERT;
679 break;
680
681 default:
682 unreachable("Unimplemented NIR return instruction type.");
683 }
684
685 rogue_shader_insert(shader, return_op);
686
687 return true;
688 }
689
trans_nir_jump(struct rogue_shader * shader,nir_jump_instr * jump)690 static bool trans_nir_jump(struct rogue_shader *shader, nir_jump_instr *jump)
691 {
692 switch (jump->type) {
693 case nir_jump_return:
694 return trans_nir_jump_return(shader, jump);
695
696 default:
697 break;
698 }
699
700 unreachable("Unimplemented NIR jump instruction type.");
701 }
702
703 /**
704 * \brief Converts a NIR shader to Rogue.
705 *
706 * \param[in] ctx Shared multi-stage build context.
707 * \param[in] nir NIR shader.
708 * \return A rogue_shader* if successful, or NULL if unsuccessful.
709 */
rogue_nir_to_rogue(struct rogue_build_ctx * ctx,const nir_shader * nir)710 struct rogue_shader *rogue_nir_to_rogue(struct rogue_build_ctx *ctx,
711 const nir_shader *nir)
712 {
713 gl_shader_stage stage = nir->info.stage;
714 struct rogue_shader *shader = rogue_shader_create(ctx, stage);
715 if (!shader)
716 return NULL;
717
718 /* Make sure we only have a single function. */
719 assert(exec_list_length(&nir->functions) == 1);
720
721 /* Translate shader entrypoint. */
722 nir_function_impl *entry = nir_shader_get_entrypoint((nir_shader *)nir);
723 nir_foreach_block (block, entry) {
724 nir_foreach_instr (instr, block) {
725 switch (instr->type) {
726 case nir_instr_type_alu:
727 /* TODO: Cleanup on failure. */
728 CHECKF(trans_nir_alu(shader, nir_instr_as_alu(instr)),
729 "Failed to translate NIR ALU instruction.");
730 break;
731
732 case nir_instr_type_intrinsic:
733 CHECKF(trans_nir_intrinsic(shader, nir_instr_as_intrinsic(instr)),
734 "Failed to translate NIR intrinsic instruction.");
735 break;
736
737 case nir_instr_type_load_const:
738 CHECKF(trans_nir_load_const(shader, nir_instr_as_load_const(instr)),
739 "Failed to translate NIR load_const instruction.");
740 break;
741
742 case nir_instr_type_jump:
743 CHECKF(trans_nir_jump(shader, nir_instr_as_jump(instr)),
744 "Failed to translate NIR jump instruction.");
745 break;
746
747 default:
748 unreachable("Unimplemented NIR instruction type.");
749 }
750 }
751 }
752
753 /* Perform register allocation. */
754 /* TODO: handle failure. */
755 if (!rogue_ra_alloc(&shader->instr_list,
756 shader->ra,
757 &ctx->common_data[stage].temps,
758 &ctx->common_data[stage].internals))
759 return NULL;
760
761 return shader;
762 }
763
764 /**
765 * \brief Creates and sets up a shared multi-stage build context.
766 *
767 * \param[in] compiler The compiler context.
768 * \return A pointer to the new build context, or NULL on failure.
769 */
770 struct rogue_build_ctx *
rogue_create_build_context(struct rogue_compiler * compiler)771 rogue_create_build_context(struct rogue_compiler *compiler)
772 {
773 struct rogue_build_ctx *ctx;
774
775 ctx = rzalloc_size(compiler, sizeof(*ctx));
776 if (!ctx)
777 return NULL;
778
779 ctx->compiler = compiler;
780
781 /* nir/rogue/binary shaders need to be default-zeroed;
782 * this is taken care of by rzalloc_size.
783 */
784
785 /* Setup non-zero defaults. */
786 ctx->stage_data.fs.msaa_mode = ROGUE_MSAA_MODE_PIXEL;
787
788 return ctx;
789 }
790