• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2022 Collabora Ltd.
3  * SPDX-License-Identifier: MIT
4  */
5 #include "mme_builder.h"
6 
7 #include <stdio.h>
8 #include <stdlib.h>
9 
10 #define MME_TU104_MAX_REGS 23
11 
12 void
mme_tu104_builder_init(struct mme_builder * b)13 mme_tu104_builder_init(struct mme_builder *b)
14 {
15    mme_reg_alloc_init(&b->reg_alloc, BITFIELD_MASK(MME_TU104_MAX_REGS));
16 }
17 
18 static void
mme_tu104_new_inst(struct mme_tu104_builder * tb)19 mme_tu104_new_inst(struct mme_tu104_builder *tb)
20 {
21    struct mme_tu104_inst noop = { MME_TU104_INST_DEFAULTS };
22    assert(tb->inst_count < ARRAY_SIZE(tb->insts));
23    tb->insts[tb->inst_count] = noop;
24    tb->inst_count++;
25    tb->inst_parts = 0;
26 }
27 
28 static struct mme_tu104_inst *
mme_tu104_cur_inst(struct mme_tu104_builder * tb)29 mme_tu104_cur_inst(struct mme_tu104_builder *tb)
30 {
31    assert(tb->inst_count > 0 && tb->inst_count < ARRAY_SIZE(tb->insts));
32    return &tb->insts[tb->inst_count - 1];
33 }
34 
35 static inline void
mme_tu104_set_inst_parts(struct mme_tu104_builder * tb,enum mme_tu104_instr_parts parts)36 mme_tu104_set_inst_parts(struct mme_tu104_builder *tb,
37                          enum mme_tu104_instr_parts parts)
38 {
39    assert(!(tb->inst_parts & parts));
40    tb->inst_parts |= parts;
41 }
42 
43 void
mme_tu104_add_inst(struct mme_builder * b,const struct mme_tu104_inst * inst)44 mme_tu104_add_inst(struct mme_builder *b,
45                    const struct mme_tu104_inst *inst)
46 {
47    struct mme_tu104_builder *tb = &b->tu104;
48 
49    if (tb->inst_parts || tb->inst_count == 0)
50       mme_tu104_new_inst(&b->tu104);
51    *mme_tu104_cur_inst(tb) = *inst;
52    mme_tu104_new_inst(tb);
53 }
54 
55 static unsigned
mme_tu104_reg_num_imms(enum mme_tu104_reg reg)56 mme_tu104_reg_num_imms(enum mme_tu104_reg reg)
57 {
58    switch (reg) {
59    case MME_TU104_REG_IMM:
60    case MME_TU104_REG_IMMPAIR:
61       return 1;
62    case MME_TU104_REG_IMM32:
63       return 2;
64    default:
65       return 0;
66    }
67 }
68 
69 static bool
mme_tu104_next_inst_can_add_alu(struct mme_tu104_builder * tb,const struct mme_tu104_alu * alu,bool must_be_alu0)70 mme_tu104_next_inst_can_add_alu(struct mme_tu104_builder *tb,
71                                 const struct mme_tu104_alu *alu,
72                                 bool must_be_alu0)
73 {
74    if (tb->inst_count == 0)
75       return false;
76 
77    /* Most ALU can be re-ordered with respect to outputs but a couple can't.
78     * In the case where it may depend on an output, flush if we have one.
79     */
80    if (mme_tu104_alu_op_may_depend_on_mthd(alu->op) &&
81        tb->inst_parts & (MME_TU104_INSTR_PART_MTHD0 |
82                          MME_TU104_INSTR_PART_EMIT0 |
83                          MME_TU104_INSTR_PART_MTHD1 |
84                          MME_TU104_INSTR_PART_EMIT1))
85       return false;
86 
87    if (must_be_alu0 && (tb->inst_parts & MME_TU104_INSTR_PART_ALU0))
88       return false;
89 
90    if (tb->inst_parts & MME_TU104_INSTR_PART_ALU1) {
91       assert(tb->inst_parts & MME_TU104_INSTR_PART_ALU0);
92       return false;
93    }
94 
95    assert(alu->src[0] != MME_TU104_REG_LOAD1 &&
96           alu->src[1] != MME_TU104_REG_LOAD0 &&
97           alu->src[1] != MME_TU104_REG_LOAD1);
98    if (alu->src[0] == MME_TU104_REG_LOAD0 &&
99        (tb->inst_parts & MME_TU104_INSTR_PART_LOAD1))
100       return false;
101 
102    const unsigned used_imms =
103       util_bitcount(tb->inst_parts & (MME_TU104_INSTR_PART_IMM0 |
104                                       MME_TU104_INSTR_PART_IMM1));
105 
106    const unsigned num_imms = mme_tu104_alu_op_has_implicit_imm(alu->op) +
107                              mme_tu104_reg_num_imms(alu->src[0]) +
108                              mme_tu104_reg_num_imms(alu->src[1]);
109    assert(num_imms <= 2);
110    if (num_imms + used_imms > 2)
111       return false;
112 
113    if (mme_tu104_alu_op_has_implicit_imm(alu->op) &&
114        (tb->inst_parts & MME_TU104_INSTR_PART_ALU0) &&
115        (tb->inst_parts & MME_TU104_INSTR_PART_IMM1))
116       return false;
117 
118    struct mme_tu104_inst *cur = mme_tu104_cur_inst(tb);
119 
120    if ((tb->inst_parts & MME_TU104_INSTR_PART_ALU0) &&
121        mme_tu104_alus_have_dependency(&cur->alu[0], alu))
122       return false;
123 
124    /* No idea why the HW has this rule but it does */
125    if (alu->op == MME_TU104_ALU_OP_STATE &&
126        (tb->inst_parts & MME_TU104_INSTR_PART_ALU0) &&
127        cur->alu[0].op == MME_TU104_ALU_OP_STATE)
128       return false;
129 
130    return true;
131 }
132 
133 static unsigned
mme_tu104_push_alu(struct mme_tu104_builder * tb,const struct mme_tu104_alu * alu,uint16_t imm0,uint16_t imm1,uint16_t implicit_imm,bool must_be_alu0)134 mme_tu104_push_alu(struct mme_tu104_builder *tb,
135                    const struct mme_tu104_alu *alu,
136                    uint16_t imm0, uint16_t imm1,
137                    uint16_t implicit_imm,
138                    bool must_be_alu0)
139 {
140    if (!mme_tu104_next_inst_can_add_alu(tb, alu, must_be_alu0))
141       mme_tu104_new_inst(tb);
142 
143    if (mme_tu104_alu_op_has_implicit_imm(alu->op) &&
144        (tb->inst_parts & MME_TU104_INSTR_PART_IMM0))
145       tb->inst_parts |= MME_TU104_INSTR_PART_ALU0;
146 
147    assert(mme_tu104_next_inst_can_add_alu(tb, alu, must_be_alu0));
148 
149    struct mme_tu104_inst *inst = mme_tu104_cur_inst(tb);
150    unsigned alu_idx = (tb->inst_parts & MME_TU104_INSTR_PART_ALU0) != 0;
151    assert(alu_idx == 0 || !must_be_alu0);
152 
153    switch (alu->op) {
154    case MME_TU104_ALU_OP_ADDC:
155       assert(inst->alu[0].op == MME_TU104_ALU_OP_ADD);
156       assert(alu_idx == 1);
157       break;
158    case MME_TU104_ALU_OP_SUBB:
159       assert(inst->alu[0].op == MME_TU104_ALU_OP_SUB);
160       assert(alu_idx == 1);
161       break;
162    case MME_TU104_ALU_OP_MULH:
163       assert(inst->alu[0].op == MME_TU104_ALU_OP_MUL ||
164              inst->alu[0].op == MME_TU104_ALU_OP_MULU);
165       assert(alu_idx == 1);
166       break;
167    default:
168       break;
169    }
170 
171    mme_tu104_set_inst_parts(tb, MME_TU104_INSTR_PART_ALU0 << alu_idx);
172    inst->alu[alu_idx] = *alu;
173 
174    if (alu->src[0] == MME_TU104_REG_LOAD0) {
175       unsigned next_load = (tb->inst_parts & MME_TU104_INSTR_PART_LOAD0) != 0;
176       mme_tu104_set_inst_parts(tb, MME_TU104_INSTR_PART_LOAD0 << next_load);
177       inst->alu[alu_idx].src[0] = MME_TU104_REG_LOAD0 + next_load;
178    }
179 
180    unsigned next_imm = (tb->inst_parts & MME_TU104_INSTR_PART_IMM0) != 0;
181    const unsigned num_imms = mme_tu104_reg_num_imms(alu->src[0]) +
182                              mme_tu104_reg_num_imms(alu->src[1]);
183 
184    if (mme_tu104_alu_op_has_implicit_imm(alu->op)) {
185       mme_tu104_set_inst_parts(tb, MME_TU104_INSTR_PART_IMM0 << alu_idx);
186       inst->imm[alu_idx] = implicit_imm;
187       assert(num_imms <= 1);
188       next_imm = 1 - alu_idx;
189    }
190 
191    if (num_imms == 1) {
192       mme_tu104_set_inst_parts(tb, MME_TU104_INSTR_PART_IMM0 << next_imm);
193       inst->imm[next_imm] = imm0;
194       assert(alu->src[0] != MME_TU104_REG_IMM32 &&
195              alu->src[0] != MME_TU104_REG_IMMPAIR &&
196              alu->src[1] != MME_TU104_REG_IMM32 &&
197              alu->src[1] != MME_TU104_REG_IMMPAIR);
198       if (alu->src[0] == MME_TU104_REG_IMM && alu_idx != next_imm)
199          inst->alu[alu_idx].src[0] = MME_TU104_REG_IMMPAIR;
200       if (alu->src[1] == MME_TU104_REG_IMM && alu_idx != next_imm)
201          inst->alu[alu_idx].src[1] = MME_TU104_REG_IMMPAIR;
202    } else if (num_imms == 2) {
203       mme_tu104_set_inst_parts(tb, MME_TU104_INSTR_PART_IMM0 |
204                                    MME_TU104_INSTR_PART_IMM1);
205       inst->imm[0] = imm0;
206       inst->imm[1] = imm1;
207    }
208 
209    return alu_idx;
210 }
211 
212 static inline enum mme_tu104_reg
mme_value_alu_reg(struct mme_value val)213 mme_value_alu_reg(struct mme_value val)
214 {
215    switch (val.type) {
216    case MME_VALUE_TYPE_ZERO:
217       return MME_TU104_REG_ZERO;
218    case MME_VALUE_TYPE_IMM:
219       if (val.imm == 0)
220          return MME_TU104_REG_ZERO;
221       else if (val.imm == (uint32_t)(int16_t)val.imm)
222          return MME_TU104_REG_IMM;
223       else
224          return MME_TU104_REG_IMM32;
225    case MME_VALUE_TYPE_REG:
226       assert(val.reg <= 23);
227       return MME_TU104_REG_R0 + val.reg;
228    }
229    unreachable("Invalid value type");
230 }
231 
232 static void
build_alu_to(struct mme_builder * b,struct mme_value dst,enum mme_tu104_alu_op op,struct mme_value x,struct mme_value y,uint16_t implicit_imm,bool must_be_alu0)233 build_alu_to(struct mme_builder *b,
234              struct mme_value dst,
235              enum mme_tu104_alu_op op,
236              struct mme_value x,
237              struct mme_value y,
238              uint16_t implicit_imm,
239              bool must_be_alu0)
240 {
241    assert(dst.type == MME_VALUE_TYPE_ZERO ||
242           dst.type == MME_VALUE_TYPE_REG);
243 
244    enum mme_tu104_reg x_reg = mme_value_alu_reg(x);
245    enum mme_tu104_reg y_reg = mme_value_alu_reg(y);
246 
247    if (x_reg == MME_TU104_REG_IMM32 && y_reg == MME_TU104_REG_IMM32) {
248       y = mme_mov(b, y);
249       y_reg = mme_value_alu_reg(y);
250    }
251 
252    if (mme_tu104_alu_op_has_implicit_imm(op) &&
253        (x_reg == MME_TU104_REG_IMM32 ||
254         (x_reg == MME_TU104_REG_IMM && y_reg == MME_TU104_REG_IMM))) {
255       x = mme_mov(b, x);
256       x_reg = mme_value_alu_reg(x);
257    }
258 
259    uint16_t imm0 = 0, imm1 = 0;
260    if (x_reg == MME_TU104_REG_IMM32) {
261       assert(mme_tu104_reg_num_imms(y_reg) == 0);
262       imm0 = x.imm >> 16;
263       imm1 = x.imm;
264    } else if (y_reg == MME_TU104_REG_IMM32) {
265       assert(mme_tu104_reg_num_imms(x_reg) == 0);
266       imm0 = y.imm >> 16;
267       imm1 = y.imm;
268    } else if (x_reg == MME_TU104_REG_IMM) {
269       assert(mme_tu104_reg_num_imms(y_reg) <= 1);
270       imm0 = x.imm;
271       if (y_reg == MME_TU104_REG_IMM) {
272          imm1 = y.imm;
273          y_reg = MME_TU104_REG_IMMPAIR;
274       }
275    } else if (y_reg == MME_TU104_REG_IMM) {
276       imm0 = y.imm;
277    } else {
278       assert(mme_tu104_reg_num_imms(x_reg) == 0);
279       assert(mme_tu104_reg_num_imms(y_reg) == 0);
280    }
281 
282    struct mme_tu104_alu alu = {
283       .dst = mme_value_alu_reg(dst),
284       .op = op,
285       .src = { x_reg, y_reg },
286    };
287    mme_tu104_push_alu(&b->tu104, &alu, imm0, imm1, implicit_imm, must_be_alu0);
288 }
289 
290 static enum mme_tu104_alu_op
mme_to_tu104_alu_op(enum mme_alu_op op)291 mme_to_tu104_alu_op(enum mme_alu_op op)
292 {
293    switch (op) {
294 #define ALU_CASE(op) case MME_ALU_OP_##op: return MME_TU104_ALU_OP_##op;
295    ALU_CASE(ADD)
296    ALU_CASE(ADDC)
297    ALU_CASE(SUB)
298    ALU_CASE(SUBB)
299    ALU_CASE(MUL)
300    ALU_CASE(MULH)
301    ALU_CASE(MULU)
302    ALU_CASE(CLZ)
303    ALU_CASE(SLL)
304    ALU_CASE(SRL)
305    ALU_CASE(SRA)
306    ALU_CASE(AND)
307    ALU_CASE(NAND)
308    ALU_CASE(OR)
309    ALU_CASE(XOR)
310    ALU_CASE(SLT)
311    ALU_CASE(SLTU)
312    ALU_CASE(SLE)
313    ALU_CASE(SLEU)
314    ALU_CASE(SEQ)
315    ALU_CASE(DREAD)
316    ALU_CASE(DWRITE)
317 #undef ALU_CASE
318    default:
319       unreachable("Unsupported MME ALU op");
320    }
321 }
322 
323 void
mme_tu104_alu_to(struct mme_builder * b,struct mme_value dst,enum mme_alu_op op,struct mme_value x,struct mme_value y)324 mme_tu104_alu_to(struct mme_builder *b,
325                  struct mme_value dst,
326                  enum mme_alu_op op,
327                  struct mme_value x,
328                  struct mme_value y)
329 {
330    build_alu_to(b, dst, mme_to_tu104_alu_op(op), x, y, 0, false);
331 }
332 
333 void
mme_tu104_alu64_to(struct mme_builder * b,struct mme_value64 dst,enum mme_alu_op op_lo,enum mme_alu_op op_hi,struct mme_value64 x,struct mme_value64 y)334 mme_tu104_alu64_to(struct mme_builder *b,
335                    struct mme_value64 dst,
336                    enum mme_alu_op op_lo,
337                    enum mme_alu_op op_hi,
338                    struct mme_value64 x,
339                    struct mme_value64 y)
340 {
341    assert(dst.lo.type == MME_VALUE_TYPE_REG);
342    assert(dst.hi.type == MME_VALUE_TYPE_REG);
343 
344    /* We can't have any non-zero immediates in the high part or else we might
345     * get half-way through emitting and realize we've run out.
346     */
347    if (x.hi.type == MME_VALUE_TYPE_IMM && x.hi.imm != 0)
348       x.hi = mme_mov(b, x.hi);
349    if (y.hi.type == MME_VALUE_TYPE_IMM && y.hi.imm != 0)
350       y.hi = mme_mov(b, y.hi);
351 
352    build_alu_to(b, dst.lo, mme_to_tu104_alu_op(op_lo), x.lo, y.lo, 0, true);
353    build_alu_to(b, dst.hi, mme_to_tu104_alu_op(op_hi), x.hi, y.hi, 0, false);
354 }
355 
356 void
mme_tu104_merge_to(struct mme_builder * b,struct mme_value dst,struct mme_value x,struct mme_value y,uint16_t dst_pos,uint16_t bits,uint16_t src_pos)357 mme_tu104_merge_to(struct mme_builder *b, struct mme_value dst,
358                    struct mme_value x, struct mme_value y,
359                    uint16_t dst_pos, uint16_t bits, uint16_t src_pos)
360 {
361    assert(dst_pos < 32);
362    assert(bits < 32);
363    assert(src_pos < 32);
364    uint32_t ctrl = (dst_pos << 10) | (bits << 5) | src_pos;
365    build_alu_to(b, dst, MME_TU104_ALU_OP_MERGE, x, y, ctrl, false);
366 }
367 
368 void
mme_tu104_state_arr_to(struct mme_builder * b,struct mme_value dst,uint16_t state,struct mme_value index)369 mme_tu104_state_arr_to(struct mme_builder *b, struct mme_value dst,
370                        uint16_t state, struct mme_value index)
371 {
372    assert(state % 4 == 0);
373    build_alu_to(b, dst, MME_TU104_ALU_OP_STATE,
374                 mme_imm(state >> 2), index, 0, false);
375 }
376 
377 void
mme_tu104_load_barrier(struct mme_builder * b)378 mme_tu104_load_barrier(struct mme_builder *b)
379 {
380    build_alu_to(b, mme_zero(), MME_TU104_ALU_OP_EXTENDED,
381                 mme_imm(0x1000), mme_imm(1), 0, false);
382 }
383 
384 void
mme_tu104_load_to(struct mme_builder * b,struct mme_value dst)385 mme_tu104_load_to(struct mme_builder *b, struct mme_value dst)
386 {
387    assert(dst.type == MME_VALUE_TYPE_REG ||
388           dst.type == MME_VALUE_TYPE_ZERO);
389 
390    struct mme_tu104_alu alu = {
391       .dst = mme_value_alu_reg(dst),
392       .op = MME_TU104_ALU_OP_ADD,
393       .src = {
394          MME_TU104_REG_LOAD0,
395          MME_TU104_REG_ZERO,
396       },
397    };
398    mme_tu104_push_alu(&b->tu104, &alu, 0, 0, 0, 0);
399 }
400 
401 static bool
mme_tu104_next_inst_can_add_mthd(struct mme_tu104_builder * tb,enum mme_tu104_out_op out)402 mme_tu104_next_inst_can_add_mthd(struct mme_tu104_builder *tb,
403                                  enum mme_tu104_out_op out)
404 {
405    if (tb->inst_count == 0)
406       return false;
407 
408    if (tb->inst_parts & MME_TU104_INSTR_PART_MTHD1) {
409       assert(tb->inst_parts & MME_TU104_INSTR_PART_MTHD0);
410       return false;
411    }
412 
413    if (out == MME_TU104_OUT_OP_IMM0 &&
414        (tb->inst_parts & MME_TU104_INSTR_PART_IMM0) &&
415        (tb->inst_parts & MME_TU104_INSTR_PART_IMM1))
416       return false;
417 
418    return true;
419 }
420 
421 static void
mme_tu104_push_mthd(struct mme_tu104_builder * tb,enum mme_tu104_out_op out,uint16_t imm)422 mme_tu104_push_mthd(struct mme_tu104_builder *tb,
423                     enum mme_tu104_out_op out,
424                     uint16_t imm)
425 {
426    struct mme_tu104_inst *inst = mme_tu104_cur_inst(tb);
427    if (out == MME_TU104_OUT_OP_IMM0) {
428       unsigned imm_idx = (tb->inst_parts & MME_TU104_INSTR_PART_IMM0) != 0;
429       mme_tu104_set_inst_parts(tb, MME_TU104_INSTR_PART_IMM0 << imm_idx);
430       out = MME_TU104_OUT_OP_IMM0 + imm_idx;
431       inst->imm[imm_idx] = imm;
432    }
433    unsigned mthd_idx = (tb->inst_parts & MME_TU104_INSTR_PART_MTHD0) != 0;
434    /* If we're pushing mthd1, the next emit MUST be emit1 */
435    if (mthd_idx > 0 && !(tb->inst_parts & MME_TU104_INSTR_PART_EMIT0))
436       mme_tu104_set_inst_parts(tb, MME_TU104_INSTR_PART_EMIT0);
437    mme_tu104_set_inst_parts(tb, MME_TU104_INSTR_PART_MTHD0 << mthd_idx);
438    inst->out[mthd_idx].mthd = out;
439 }
440 
441 void
mme_tu104_mthd(struct mme_builder * b,uint16_t mthd,struct mme_value index)442 mme_tu104_mthd(struct mme_builder *b, uint16_t mthd, struct mme_value index)
443 {
444    struct mme_tu104_builder *tb = &b->tu104;
445 
446    assert(mthd % 4 == 0);
447    uint32_t mthd_imm = (1 << 12) | (mthd >> 2);
448 
449    if (index.type == MME_VALUE_TYPE_REG) {
450       if (!mme_tu104_next_inst_can_add_mthd(tb, MME_TU104_OUT_OP_ALU0))
451          mme_tu104_new_inst(tb);
452 
453       const struct mme_tu104_alu alu = {
454          .dst = MME_TU104_REG_ZERO,
455          .op = MME_TU104_ALU_OP_ADD,
456          .src = {
457             MME_TU104_REG_IMM,
458             mme_value_alu_reg(index),
459          },
460       };
461       unsigned alu_idx = mme_tu104_push_alu(tb, &alu, mthd_imm, 0, 0, false);
462       mme_tu104_push_mthd(tb, MME_TU104_OUT_OP_ALU0 + alu_idx, 0);
463    } else {
464       if (!mme_tu104_next_inst_can_add_mthd(tb, MME_TU104_OUT_OP_IMM0))
465          mme_tu104_new_inst(tb);
466 
467       if (index.type == MME_VALUE_TYPE_IMM)
468          mthd_imm += index.imm;
469 
470       mme_tu104_push_mthd(tb, MME_TU104_OUT_OP_IMM0, mthd_imm);
471    }
472 }
473 
474 static bool
mme_tu104_next_inst_can_add_emit(struct mme_tu104_builder * tb,enum mme_tu104_out_op out,uint32_t imm)475 mme_tu104_next_inst_can_add_emit(struct mme_tu104_builder *tb,
476                                  enum mme_tu104_out_op out,
477                                  uint32_t imm)
478 {
479    assert(tb->inst_count > 0);
480 
481    if (tb->inst_parts & MME_TU104_INSTR_PART_EMIT1) {
482       assert(tb->inst_parts & MME_TU104_INSTR_PART_EMIT0);
483       return false;
484    }
485 
486    const unsigned used_imms =
487       util_bitcount(tb->inst_parts & (MME_TU104_INSTR_PART_IMM0 |
488                                       MME_TU104_INSTR_PART_IMM1));
489    if (out == MME_TU104_OUT_OP_IMM0 && used_imms > 1)
490       return false;
491    if (out == MME_TU104_OUT_OP_IMM32 && used_imms > 0)
492       return false;
493 
494    return true;
495 }
496 
497 static void
mme_tu104_push_emit(struct mme_tu104_builder * tb,enum mme_tu104_out_op out,uint32_t imm)498 mme_tu104_push_emit(struct mme_tu104_builder *tb,
499                     enum mme_tu104_out_op out,
500                     uint32_t imm)
501 {
502    struct mme_tu104_inst *inst = mme_tu104_cur_inst(tb);
503    if (out == MME_TU104_OUT_OP_IMM0) {
504       unsigned imm_idx = (tb->inst_parts & MME_TU104_INSTR_PART_IMM0) != 0;
505       mme_tu104_set_inst_parts(tb, MME_TU104_INSTR_PART_IMM0 << imm_idx);
506       out = MME_TU104_OUT_OP_IMM0 + imm_idx;
507       inst->imm[imm_idx] = imm;
508    } else if (out == MME_TU104_OUT_OP_IMM32) {
509       mme_tu104_set_inst_parts(tb, MME_TU104_INSTR_PART_IMM0 |
510                                    MME_TU104_INSTR_PART_IMM1);
511       inst->imm[0] = imm >> 16;
512       inst->imm[1] = imm;
513    }
514    unsigned emit_idx = (tb->inst_parts & MME_TU104_INSTR_PART_EMIT0) != 0;
515    mme_tu104_set_inst_parts(tb, MME_TU104_INSTR_PART_EMIT0 << emit_idx);
516    /* If we're pushing emitN, the next mthd MUST be mthdN+1 */
517    if (!(tb->inst_parts & (MME_TU104_INSTR_PART_MTHD0 << emit_idx)))
518       mme_tu104_set_inst_parts(tb, MME_TU104_INSTR_PART_MTHD0 << emit_idx);
519    inst->out[emit_idx].emit = out;
520 }
521 
522 static int
find_alu_idx_for_dst(const struct mme_tu104_inst * inst,struct mme_value dst)523 find_alu_idx_for_dst(const struct mme_tu104_inst *inst,
524                      struct mme_value dst)
525 {
526    assert(dst.type == MME_VALUE_TYPE_REG);
527    for (int i = 0; i < 2; i++) {
528       if (inst->alu[i].dst == mme_value_alu_reg(dst))
529          return i;
530    }
531    return -1;
532 }
533 
534 void
mme_tu104_emit(struct mme_builder * b,struct mme_value data)535 mme_tu104_emit(struct mme_builder *b, struct mme_value data)
536 {
537    struct mme_tu104_builder *tb = &b->tu104;
538 
539    if (data.type == MME_VALUE_TYPE_REG) {
540       if (!mme_tu104_next_inst_can_add_emit(tb, MME_TU104_OUT_OP_ALU0, 0))
541          mme_tu104_new_inst(tb);
542 
543       struct mme_tu104_inst *inst = mme_tu104_cur_inst(tb);
544       int alu_idx = find_alu_idx_for_dst(inst, data);
545       if (alu_idx < 0) {
546          const struct mme_tu104_alu alu = {
547             .dst = MME_TU104_REG_ZERO,
548             .op = MME_TU104_ALU_OP_ADD,
549             .src = {
550                mme_value_alu_reg(data),
551                MME_TU104_REG_ZERO,
552             },
553          };
554          alu_idx = mme_tu104_push_alu(tb, &alu, 0, 0, 0, false);
555       }
556       mme_tu104_push_emit(tb, MME_TU104_OUT_OP_ALU0 + alu_idx, 0);
557    } else {
558       enum mme_tu104_out_op out;
559       uint32_t imm;
560       if (data.type == MME_VALUE_TYPE_ZERO) {
561          out = MME_TU104_OUT_OP_IMM0;
562          imm = 0;
563       } else {
564          assert(data.type == MME_VALUE_TYPE_IMM);
565          imm = data.imm;
566          out = data.imm == (uint16_t)data.imm ? MME_TU104_OUT_OP_IMM0 :
567                                                 MME_TU104_OUT_OP_IMM32;
568       }
569       if (!mme_tu104_next_inst_can_add_emit(tb, out, 0))
570          mme_tu104_new_inst(tb);
571 
572       mme_tu104_push_emit(tb, out, imm);
573    }
574 }
575 
576 static enum mme_tu104_alu_op
mme_cmp_to_tu104_branch_op(enum mme_cmp_op op)577 mme_cmp_to_tu104_branch_op(enum mme_cmp_op op)
578 {
579    switch (op) {
580 #define CMP_CASE(op) case MME_CMP_OP_##op: return MME_TU104_ALU_OP_B##op;
581    CMP_CASE(LT)
582    CMP_CASE(LTU)
583    CMP_CASE(LE)
584    CMP_CASE(LEU)
585    CMP_CASE(EQ)
586 #undef CMP_CASE
587    default:
588       unreachable("Unsupported MME CMP op");
589    }
590 }
591 
592 static void
mme_tu104_start_cf(struct mme_builder * b,enum mme_cf_type type,enum mme_tu104_alu_op op,struct mme_value x,struct mme_value y,uint16_t control)593 mme_tu104_start_cf(struct mme_builder *b,
594                    enum mme_cf_type type,
595                    enum mme_tu104_alu_op op,
596                    struct mme_value x,
597                    struct mme_value y,
598                    uint16_t control)
599 {
600    struct mme_tu104_builder *tb = &b->tu104;
601 
602    /* The HW seems to want at least LOOP to always be in alu0 */
603    build_alu_to(b, mme_zero(), op, x, y, control, true);
604 
605    uint16_t ip = tb->inst_count - 1;
606    assert(tb->insts[ip].alu[0].op == op);
607 
608    tb->cf_stack[tb->cf_depth++] = (struct mme_cf) {
609       .type = type,
610       .start_ip = ip,
611    };
612 
613    /* The inside of control-flow needs to start with a new instruction */
614    mme_tu104_new_inst(tb);
615 }
616 
617 static struct mme_cf
mme_tu104_end_cf(struct mme_builder * b,enum mme_cf_type type)618 mme_tu104_end_cf(struct mme_builder *b, enum mme_cf_type type)
619 {
620    struct mme_tu104_builder *tb = &b->tu104;
621 
622    if (tb->inst_parts)
623       mme_tu104_new_inst(tb);
624 
625    assert(tb->cf_depth > 0);
626    struct mme_cf cf = tb->cf_stack[--tb->cf_depth];
627    assert(cf.type == type);
628 
629    int delta = tb->inst_count - cf.start_ip - 1;
630    assert(delta > 0 && delta < (1 << 13));
631    tb->insts[cf.start_ip].imm[0] |= delta;
632 
633    return cf;
634 }
635 
636 void
mme_tu104_start_loop(struct mme_builder * b,struct mme_value count)637 mme_tu104_start_loop(struct mme_builder *b, struct mme_value count)
638 {
639    mme_tu104_start_cf(b, MME_CF_TYPE_LOOP, MME_TU104_ALU_OP_LOOP,
640                       count, mme_zero(), 0);
641 }
642 
643 void
mme_tu104_end_loop(struct mme_builder * b)644 mme_tu104_end_loop(struct mme_builder *b)
645 {
646    mme_tu104_end_cf(b, MME_CF_TYPE_LOOP);
647 }
648 
649 void
mme_tu104_start_if(struct mme_builder * b,enum mme_cmp_op op,bool if_true,struct mme_value x,struct mme_value y)650 mme_tu104_start_if(struct mme_builder *b,
651                    enum mme_cmp_op op, bool if_true,
652                    struct mme_value x, struct mme_value y)
653 {
654    uint16_t control = if_true ? 0 : BITFIELD_BIT(15);
655    mme_tu104_start_cf(b, MME_CF_TYPE_IF, mme_cmp_to_tu104_branch_op(op),
656                       x, y, control);
657 }
658 
659 void
mme_tu104_end_if(struct mme_builder * b)660 mme_tu104_end_if(struct mme_builder *b)
661 {
662    mme_tu104_end_cf(b, MME_CF_TYPE_IF);
663 }
664 
665 void
mme_tu104_start_while(struct mme_builder * b)666 mme_tu104_start_while(struct mme_builder *b)
667 {
668    mme_tu104_start_cf(b, MME_CF_TYPE_WHILE, MME_TU104_ALU_OP_JAL,
669                       mme_zero(), mme_zero(), BITFIELD_BIT(15));
670 }
671 
672 void
mme_tu104_end_while(struct mme_builder * b,enum mme_cmp_op cmp,bool if_true,struct mme_value x,struct mme_value y)673 mme_tu104_end_while(struct mme_builder *b,
674                     enum mme_cmp_op cmp,
675                     bool if_true,
676                     struct mme_value x,
677                     struct mme_value y)
678 {
679    struct mme_tu104_builder *tb = &b->tu104;
680 
681    struct mme_cf cf = mme_tu104_end_cf(b, MME_CF_TYPE_WHILE);
682 
683    int delta = tb->inst_count - cf.start_ip - 2;
684    uint16_t control = (-delta & BITFIELD_MASK(13)) |
685                       (if_true ? BITFIELD_BIT(15) : 0);
686    build_alu_to(b, mme_zero(), mme_cmp_to_tu104_branch_op(cmp),
687                 x, y, control, true);
688 
689    /* Start a new instruction so next thing to come along doesn't end up being
690     * the 2nd half of of our back-edge while.
691     */
692    mme_tu104_new_inst(tb);
693 }
694 
mme_tu104_exit_if(struct mme_builder * b,enum mme_cmp_op op,bool if_true,struct mme_value x,struct mme_value y)695 void mme_tu104_exit_if(struct mme_builder *b,
696                        enum mme_cmp_op op,
697                        bool if_true,
698                        struct mme_value x,
699                        struct mme_value y)
700 {
701    struct mme_tu104_builder *tb = &b->tu104;
702 
703    /* we reverse it as we want to take the branch if the condition is true */
704    uint16_t control = if_true ? BITFIELD_BIT(15) : 0;
705    /* magic offset to exit the macro */
706    control |= 0x1000;
707    build_alu_to(b, mme_zero(), mme_cmp_to_tu104_branch_op(op), x, y, control,
708                 true);
709 
710    mme_tu104_new_inst(tb);
711 }
712 
713 uint32_t *
mme_tu104_builder_finish(struct mme_tu104_builder * tb,size_t * size_out)714 mme_tu104_builder_finish(struct mme_tu104_builder *tb, size_t *size_out)
715 {
716    assert(tb->cf_depth == 0);
717 
718    /* TODO: If there are at least two instructions and we can guarantee the
719     * last two instructions get exeucted (not in control-flow), we don't need
720     * to add a pair of NOPs.
721     */
722    mme_tu104_new_inst(tb);
723    mme_tu104_new_inst(tb);
724    tb->insts[tb->inst_count - 2].end_next = true;
725 
726    if (0)
727       mme_tu104_print(stderr, tb->insts, tb->inst_count);
728 
729    size_t enc_size = tb->inst_count * 3 * sizeof(uint32_t);
730    uint32_t *enc = malloc(enc_size);
731    if (enc != NULL) {
732       mme_tu104_encode(enc, tb->inst_count, tb->insts);
733       *size_out = enc_size;
734    }
735    return enc;
736 }
737 
738 void
mme_tu104_builder_dump(struct mme_builder * b,FILE * fp)739 mme_tu104_builder_dump(struct mme_builder *b, FILE *fp)
740 {
741    struct mme_tu104_builder *tb = &b->tu104;
742 
743    mme_tu104_print(stderr, tb->insts, tb->inst_count);
744 }
745