1 /*
2 * Copyright (c) 2012 Rob Clark <robdclark@gmail.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "ir3.h"
25
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <string.h>
29 #include <assert.h>
30 #include <stdbool.h>
31 #include <errno.h>
32
33 #include "util/bitscan.h"
34 #include "util/ralloc.h"
35 #include "util/u_math.h"
36
37 #include "instr-a3xx.h"
38 #include "ir3_shader.h"
39
40 /* simple allocator to carve allocations out of an up-front allocated heap,
41 * so that we can free everything easily in one shot.
42 */
ir3_alloc(struct ir3 * shader,int sz)43 void * ir3_alloc(struct ir3 *shader, int sz)
44 {
45 return rzalloc_size(shader, sz); /* TODO: don't use rzalloc */
46 }
47
ir3_create(struct ir3_compiler * compiler,struct ir3_shader_variant * v)48 struct ir3 * ir3_create(struct ir3_compiler *compiler,
49 struct ir3_shader_variant *v)
50 {
51 struct ir3 *shader = rzalloc(v, struct ir3);
52
53 shader->compiler = compiler;
54 shader->type = v->type;
55
56 list_inithead(&shader->block_list);
57 list_inithead(&shader->array_list);
58
59 return shader;
60 }
61
ir3_destroy(struct ir3 * shader)62 void ir3_destroy(struct ir3 *shader)
63 {
64 ralloc_free(shader);
65 }
66
67 #define iassert(cond) do { \
68 if (!(cond)) { \
69 debug_assert(cond); \
70 return -1; \
71 } } while (0)
72
73 #define iassert_type(reg, full) do { \
74 if ((full)) { \
75 iassert(!((reg)->flags & IR3_REG_HALF)); \
76 } else { \
77 iassert((reg)->flags & IR3_REG_HALF); \
78 } } while (0);
79
reg(struct ir3_register * reg,struct ir3_info * info,uint32_t repeat,uint32_t valid_flags)80 static uint32_t reg(struct ir3_register *reg, struct ir3_info *info,
81 uint32_t repeat, uint32_t valid_flags)
82 {
83 struct ir3_shader_variant *v = info->data;
84 reg_t val = { .dummy32 = 0 };
85
86 if (reg->flags & ~valid_flags) {
87 debug_printf("INVALID FLAGS: %x vs %x\n",
88 reg->flags, valid_flags);
89 }
90
91 if (!(reg->flags & IR3_REG_R))
92 repeat = 0;
93
94 if (reg->flags & IR3_REG_IMMED) {
95 val.iim_val = reg->iim_val;
96 } else {
97 unsigned components;
98 int16_t max;
99
100 if (reg->flags & IR3_REG_RELATIV) {
101 components = reg->size;
102 val.idummy10 = reg->array.offset;
103 max = (reg->array.offset + repeat + components - 1);
104 } else {
105 components = util_last_bit(reg->wrmask);
106 val.comp = reg->num & 0x3;
107 val.num = reg->num >> 2;
108 max = (reg->num + repeat + components - 1);
109 }
110
111 if (reg->flags & IR3_REG_CONST) {
112 info->max_const = MAX2(info->max_const, max >> 2);
113 } else if (val.num == 63) {
114 /* ignore writes to dummy register r63.x */
115 } else if (max < regid(48, 0)) {
116 if (reg->flags & IR3_REG_HALF) {
117 if (v->mergedregs) {
118 /* starting w/ a6xx, half regs conflict with full regs: */
119 info->max_reg = MAX2(info->max_reg, max >> 3);
120 } else {
121 info->max_half_reg = MAX2(info->max_half_reg, max >> 2);
122 }
123 } else {
124 info->max_reg = MAX2(info->max_reg, max >> 2);
125 }
126 }
127 }
128
129 return val.dummy32;
130 }
131
emit_cat0(struct ir3_instruction * instr,void * ptr,struct ir3_info * info)132 static int emit_cat0(struct ir3_instruction *instr, void *ptr,
133 struct ir3_info *info)
134 {
135 struct ir3_shader_variant *v = info->data;
136 instr_cat0_t *cat0 = ptr;
137
138 if (v->shader->compiler->gpu_id >= 500) {
139 cat0->a5xx.immed = instr->cat0.immed;
140 } else if (v->shader->compiler->gpu_id >= 400) {
141 cat0->a4xx.immed = instr->cat0.immed;
142 } else {
143 cat0->a3xx.immed = instr->cat0.immed;
144 }
145 cat0->repeat = instr->repeat;
146 cat0->ss = !!(instr->flags & IR3_INSTR_SS);
147 cat0->inv0 = instr->cat0.inv;
148 cat0->comp0 = instr->cat0.comp;
149 cat0->opc = instr->opc;
150 cat0->opc_hi = instr->opc >= 16;
151 cat0->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
152 cat0->sync = !!(instr->flags & IR3_INSTR_SY);
153 cat0->opc_cat = 0;
154
155 return 0;
156 }
157
emit_cat1(struct ir3_instruction * instr,void * ptr,struct ir3_info * info)158 static int emit_cat1(struct ir3_instruction *instr, void *ptr,
159 struct ir3_info *info)
160 {
161 struct ir3_register *dst = instr->regs[0];
162 struct ir3_register *src = instr->regs[1];
163 instr_cat1_t *cat1 = ptr;
164
165 iassert(instr->regs_count == 2);
166 iassert_type(dst, type_size(instr->cat1.dst_type) == 32);
167 if (!(src->flags & IR3_REG_IMMED))
168 iassert_type(src, type_size(instr->cat1.src_type) == 32);
169
170 if (src->flags & IR3_REG_IMMED) {
171 cat1->iim_val = src->iim_val;
172 cat1->src_im = 1;
173 } else if (src->flags & IR3_REG_RELATIV) {
174 cat1->off = reg(src, info, instr->repeat,
175 IR3_REG_R | IR3_REG_CONST | IR3_REG_HALF | IR3_REG_RELATIV);
176 cat1->src_rel = 1;
177 cat1->src_rel_c = !!(src->flags & IR3_REG_CONST);
178 } else {
179 cat1->src = reg(src, info, instr->repeat,
180 IR3_REG_R | IR3_REG_CONST | IR3_REG_HALF);
181 cat1->src_c = !!(src->flags & IR3_REG_CONST);
182 }
183
184 cat1->dst = reg(dst, info, instr->repeat,
185 IR3_REG_RELATIV | IR3_REG_EVEN |
186 IR3_REG_R | IR3_REG_POS_INF | IR3_REG_HALF);
187 cat1->repeat = instr->repeat;
188 cat1->src_r = !!(src->flags & IR3_REG_R);
189 cat1->ss = !!(instr->flags & IR3_INSTR_SS);
190 cat1->ul = !!(instr->flags & IR3_INSTR_UL);
191 cat1->dst_type = instr->cat1.dst_type;
192 cat1->dst_rel = !!(dst->flags & IR3_REG_RELATIV);
193 cat1->src_type = instr->cat1.src_type;
194 cat1->even = !!(dst->flags & IR3_REG_EVEN);
195 cat1->pos_inf = !!(dst->flags & IR3_REG_POS_INF);
196 cat1->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
197 cat1->sync = !!(instr->flags & IR3_INSTR_SY);
198 cat1->opc_cat = 1;
199
200 return 0;
201 }
202
emit_cat2(struct ir3_instruction * instr,void * ptr,struct ir3_info * info)203 static int emit_cat2(struct ir3_instruction *instr, void *ptr,
204 struct ir3_info *info)
205 {
206 struct ir3_register *dst = instr->regs[0];
207 struct ir3_register *src1 = instr->regs[1];
208 struct ir3_register *src2 = instr->regs[2];
209 instr_cat2_t *cat2 = ptr;
210 unsigned absneg = ir3_cat2_absneg(instr->opc);
211
212 iassert((instr->regs_count == 2) || (instr->regs_count == 3));
213
214 if (instr->nop) {
215 iassert(!instr->repeat);
216 iassert(instr->nop <= 3);
217
218 cat2->src1_r = instr->nop & 0x1;
219 cat2->src2_r = (instr->nop >> 1) & 0x1;
220 } else {
221 cat2->src1_r = !!(src1->flags & IR3_REG_R);
222 if (src2)
223 cat2->src2_r = !!(src2->flags & IR3_REG_R);
224 }
225
226 if (src1->flags & IR3_REG_RELATIV) {
227 iassert(src1->array.offset < (1 << 10));
228 cat2->rel1.src1 = reg(src1, info, instr->repeat,
229 IR3_REG_RELATIV | IR3_REG_CONST | IR3_REG_R |
230 IR3_REG_HALF | absneg);
231 cat2->rel1.src1_c = !!(src1->flags & IR3_REG_CONST);
232 cat2->rel1.src1_rel = 1;
233 } else if (src1->flags & IR3_REG_CONST) {
234 iassert(src1->num < (1 << 12));
235 cat2->c1.src1 = reg(src1, info, instr->repeat,
236 IR3_REG_CONST | IR3_REG_R | IR3_REG_HALF |
237 absneg);
238 cat2->c1.src1_c = 1;
239 } else {
240 iassert(src1->num < (1 << 11));
241 cat2->src1 = reg(src1, info, instr->repeat,
242 IR3_REG_IMMED | IR3_REG_R | IR3_REG_HALF |
243 absneg);
244 }
245 cat2->src1_im = !!(src1->flags & IR3_REG_IMMED);
246 cat2->src1_neg = !!(src1->flags & (IR3_REG_FNEG | IR3_REG_SNEG | IR3_REG_BNOT));
247 cat2->src1_abs = !!(src1->flags & (IR3_REG_FABS | IR3_REG_SABS));
248
249 if (src2) {
250 iassert((src2->flags & IR3_REG_IMMED) ||
251 !((src1->flags ^ src2->flags) & IR3_REG_HALF));
252
253 if (src2->flags & IR3_REG_RELATIV) {
254 iassert(src2->array.offset < (1 << 10));
255 cat2->rel2.src2 = reg(src2, info, instr->repeat,
256 IR3_REG_RELATIV | IR3_REG_CONST | IR3_REG_R |
257 IR3_REG_HALF | absneg);
258 cat2->rel2.src2_c = !!(src2->flags & IR3_REG_CONST);
259 cat2->rel2.src2_rel = 1;
260 } else if (src2->flags & IR3_REG_CONST) {
261 iassert(src2->num < (1 << 12));
262 cat2->c2.src2 = reg(src2, info, instr->repeat,
263 IR3_REG_CONST | IR3_REG_R | IR3_REG_HALF |
264 absneg);
265 cat2->c2.src2_c = 1;
266 } else {
267 iassert(src2->num < (1 << 11));
268 cat2->src2 = reg(src2, info, instr->repeat,
269 IR3_REG_IMMED | IR3_REG_R | IR3_REG_HALF |
270 absneg);
271 }
272
273 cat2->src2_im = !!(src2->flags & IR3_REG_IMMED);
274 cat2->src2_neg = !!(src2->flags & (IR3_REG_FNEG | IR3_REG_SNEG | IR3_REG_BNOT));
275 cat2->src2_abs = !!(src2->flags & (IR3_REG_FABS | IR3_REG_SABS));
276 }
277
278 cat2->dst = reg(dst, info, instr->repeat,
279 IR3_REG_R | IR3_REG_EI | IR3_REG_HALF);
280 cat2->repeat = instr->repeat;
281 cat2->sat = !!(instr->flags & IR3_INSTR_SAT);
282 cat2->ss = !!(instr->flags & IR3_INSTR_SS);
283 cat2->ul = !!(instr->flags & IR3_INSTR_UL);
284 cat2->dst_half = !!((src1->flags ^ dst->flags) & IR3_REG_HALF);
285 cat2->ei = !!(dst->flags & IR3_REG_EI);
286 cat2->cond = instr->cat2.condition;
287 cat2->full = ! (src1->flags & IR3_REG_HALF);
288 cat2->opc = instr->opc;
289 cat2->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
290 cat2->sync = !!(instr->flags & IR3_INSTR_SY);
291 cat2->opc_cat = 2;
292
293 return 0;
294 }
295
emit_cat3(struct ir3_instruction * instr,void * ptr,struct ir3_info * info)296 static int emit_cat3(struct ir3_instruction *instr, void *ptr,
297 struct ir3_info *info)
298 {
299 struct ir3_register *dst = instr->regs[0];
300 struct ir3_register *src1 = instr->regs[1];
301 struct ir3_register *src2 = instr->regs[2];
302 struct ir3_register *src3 = instr->regs[3];
303 unsigned absneg = ir3_cat3_absneg(instr->opc);
304 instr_cat3_t *cat3 = ptr;
305 uint32_t src_flags = 0;
306
307 switch (instr->opc) {
308 case OPC_MAD_F16:
309 case OPC_MAD_U16:
310 case OPC_MAD_S16:
311 case OPC_SEL_B16:
312 case OPC_SEL_S16:
313 case OPC_SEL_F16:
314 case OPC_SAD_S16:
315 case OPC_SAD_S32: // really??
316 src_flags |= IR3_REG_HALF;
317 break;
318 default:
319 break;
320 }
321
322 iassert(instr->regs_count == 4);
323 iassert(!((src1->flags ^ src_flags) & IR3_REG_HALF));
324 iassert(!((src2->flags ^ src_flags) & IR3_REG_HALF));
325 iassert(!((src3->flags ^ src_flags) & IR3_REG_HALF));
326
327 if (instr->nop) {
328 iassert(!instr->repeat);
329 iassert(instr->nop <= 3);
330
331 cat3->src1_r = instr->nop & 0x1;
332 cat3->src2_r = (instr->nop >> 1) & 0x1;
333 } else {
334 cat3->src1_r = !!(src1->flags & IR3_REG_R);
335 cat3->src2_r = !!(src2->flags & IR3_REG_R);
336 }
337
338 if (src1->flags & IR3_REG_RELATIV) {
339 iassert(src1->array.offset < (1 << 10));
340 cat3->rel1.src1 = reg(src1, info, instr->repeat,
341 IR3_REG_RELATIV | IR3_REG_CONST | IR3_REG_R |
342 IR3_REG_HALF | absneg);
343 cat3->rel1.src1_c = !!(src1->flags & IR3_REG_CONST);
344 cat3->rel1.src1_rel = 1;
345 } else if (src1->flags & IR3_REG_CONST) {
346 iassert(src1->num < (1 << 12));
347 cat3->c1.src1 = reg(src1, info, instr->repeat,
348 IR3_REG_CONST | IR3_REG_R | IR3_REG_HALF | absneg);
349 cat3->c1.src1_c = 1;
350 } else {
351 iassert(src1->num < (1 << 11));
352 cat3->src1 = reg(src1, info, instr->repeat,
353 IR3_REG_R | IR3_REG_HALF | absneg);
354 }
355
356 cat3->src1_neg = !!(src1->flags & (IR3_REG_FNEG | IR3_REG_SNEG | IR3_REG_BNOT));
357
358 cat3->src2 = reg(src2, info, instr->repeat,
359 IR3_REG_CONST | IR3_REG_R | IR3_REG_HALF | absneg);
360 cat3->src2_c = !!(src2->flags & IR3_REG_CONST);
361 cat3->src2_neg = !!(src2->flags & (IR3_REG_FNEG | IR3_REG_SNEG | IR3_REG_BNOT));
362
363 if (src3->flags & IR3_REG_RELATIV) {
364 iassert(src3->array.offset < (1 << 10));
365 cat3->rel2.src3 = reg(src3, info, instr->repeat,
366 IR3_REG_RELATIV | IR3_REG_CONST | IR3_REG_R |
367 IR3_REG_HALF | absneg);
368 cat3->rel2.src3_c = !!(src3->flags & IR3_REG_CONST);
369 cat3->rel2.src3_rel = 1;
370 } else if (src3->flags & IR3_REG_CONST) {
371 iassert(src3->num < (1 << 12));
372 cat3->c2.src3 = reg(src3, info, instr->repeat,
373 IR3_REG_CONST | IR3_REG_R | IR3_REG_HALF | absneg);
374 cat3->c2.src3_c = 1;
375 } else {
376 iassert(src3->num < (1 << 11));
377 cat3->src3 = reg(src3, info, instr->repeat,
378 IR3_REG_R | IR3_REG_HALF | absneg);
379 }
380
381 cat3->src3_neg = !!(src3->flags & (IR3_REG_FNEG | IR3_REG_SNEG | IR3_REG_BNOT));
382 cat3->src3_r = !!(src3->flags & IR3_REG_R);
383
384 cat3->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
385 cat3->repeat = instr->repeat;
386 cat3->sat = !!(instr->flags & IR3_INSTR_SAT);
387 cat3->ss = !!(instr->flags & IR3_INSTR_SS);
388 cat3->ul = !!(instr->flags & IR3_INSTR_UL);
389 cat3->dst_half = !!((src_flags ^ dst->flags) & IR3_REG_HALF);
390 cat3->opc = instr->opc;
391 cat3->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
392 cat3->sync = !!(instr->flags & IR3_INSTR_SY);
393 cat3->opc_cat = 3;
394
395 return 0;
396 }
397
emit_cat4(struct ir3_instruction * instr,void * ptr,struct ir3_info * info)398 static int emit_cat4(struct ir3_instruction *instr, void *ptr,
399 struct ir3_info *info)
400 {
401 struct ir3_register *dst = instr->regs[0];
402 struct ir3_register *src = instr->regs[1];
403 instr_cat4_t *cat4 = ptr;
404
405 iassert(instr->regs_count == 2);
406
407 if (src->flags & IR3_REG_RELATIV) {
408 iassert(src->array.offset < (1 << 10));
409 cat4->rel.src = reg(src, info, instr->repeat,
410 IR3_REG_RELATIV | IR3_REG_CONST | IR3_REG_FNEG |
411 IR3_REG_FABS | IR3_REG_R | IR3_REG_HALF);
412 cat4->rel.src_c = !!(src->flags & IR3_REG_CONST);
413 cat4->rel.src_rel = 1;
414 } else if (src->flags & IR3_REG_CONST) {
415 iassert(src->num < (1 << 12));
416 cat4->c.src = reg(src, info, instr->repeat,
417 IR3_REG_CONST | IR3_REG_FNEG | IR3_REG_FABS |
418 IR3_REG_R | IR3_REG_HALF);
419 cat4->c.src_c = 1;
420 } else {
421 iassert(src->num < (1 << 11));
422 cat4->src = reg(src, info, instr->repeat,
423 IR3_REG_IMMED | IR3_REG_FNEG | IR3_REG_FABS |
424 IR3_REG_R | IR3_REG_HALF);
425 }
426
427 cat4->src_im = !!(src->flags & IR3_REG_IMMED);
428 cat4->src_neg = !!(src->flags & IR3_REG_FNEG);
429 cat4->src_abs = !!(src->flags & IR3_REG_FABS);
430 cat4->src_r = !!(src->flags & IR3_REG_R);
431
432 cat4->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
433 cat4->repeat = instr->repeat;
434 cat4->sat = !!(instr->flags & IR3_INSTR_SAT);
435 cat4->ss = !!(instr->flags & IR3_INSTR_SS);
436 cat4->ul = !!(instr->flags & IR3_INSTR_UL);
437 cat4->dst_half = !!((src->flags ^ dst->flags) & IR3_REG_HALF);
438 cat4->full = ! (src->flags & IR3_REG_HALF);
439 cat4->opc = instr->opc;
440 cat4->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
441 cat4->sync = !!(instr->flags & IR3_INSTR_SY);
442 cat4->opc_cat = 4;
443
444 return 0;
445 }
446
emit_cat5(struct ir3_instruction * instr,void * ptr,struct ir3_info * info)447 static int emit_cat5(struct ir3_instruction *instr, void *ptr,
448 struct ir3_info *info)
449 {
450 struct ir3_register *dst = instr->regs[0];
451 /* To simplify things when there could be zero, one, or two args other
452 * than tex/sampler idx, we use the first src reg in the ir to hold
453 * samp_tex hvec2:
454 */
455 struct ir3_register *src1;
456 struct ir3_register *src2;
457 instr_cat5_t *cat5 = ptr;
458
459 iassert((instr->regs_count == 1) ||
460 (instr->regs_count == 2) ||
461 (instr->regs_count == 3) ||
462 (instr->regs_count == 4));
463
464 if (instr->flags & IR3_INSTR_S2EN) {
465 src1 = instr->regs[2];
466 src2 = instr->regs_count > 3 ? instr->regs[3] : NULL;
467 } else {
468 src1 = instr->regs_count > 1 ? instr->regs[1] : NULL;
469 src2 = instr->regs_count > 2 ? instr->regs[2] : NULL;
470 }
471
472 assume(src1 || !src2);
473
474 if (src1) {
475 cat5->full = ! (src1->flags & IR3_REG_HALF);
476 cat5->src1 = reg(src1, info, instr->repeat, IR3_REG_HALF);
477 }
478
479 if (src2) {
480 iassert(!((src1->flags ^ src2->flags) & IR3_REG_HALF));
481 cat5->src2 = reg(src2, info, instr->repeat, IR3_REG_HALF);
482 }
483
484 if (instr->flags & IR3_INSTR_B) {
485 cat5->s2en_bindless.base_hi = instr->cat5.tex_base >> 1;
486 cat5->base_lo = instr->cat5.tex_base & 1;
487 }
488
489 if (instr->flags & IR3_INSTR_S2EN) {
490 struct ir3_register *samp_tex = instr->regs[1];
491 cat5->s2en_bindless.src3 = reg(samp_tex, info, instr->repeat,
492 (instr->flags & IR3_INSTR_B) ? 0 : IR3_REG_HALF);
493 if (instr->flags & IR3_INSTR_B) {
494 if (instr->flags & IR3_INSTR_A1EN) {
495 cat5->s2en_bindless.desc_mode = CAT5_BINDLESS_A1_UNIFORM;
496 } else {
497 cat5->s2en_bindless.desc_mode = CAT5_BINDLESS_UNIFORM;
498 }
499 } else {
500 /* TODO: This should probably be CAT5_UNIFORM, at least on a6xx,
501 * as this is what the blob does and it is presumably faster, but
502 * first we should confirm it is actually nonuniform and figure
503 * out when the whole descriptor mode mechanism was introduced.
504 */
505 cat5->s2en_bindless.desc_mode = CAT5_NONUNIFORM;
506 }
507 iassert(!(instr->cat5.samp | instr->cat5.tex));
508 } else if (instr->flags & IR3_INSTR_B) {
509 cat5->s2en_bindless.src3 = instr->cat5.samp;
510 if (instr->flags & IR3_INSTR_A1EN) {
511 cat5->s2en_bindless.desc_mode = CAT5_BINDLESS_A1_IMM;
512 } else {
513 cat5->s2en_bindless.desc_mode = CAT5_BINDLESS_IMM;
514 }
515 } else {
516 cat5->norm.samp = instr->cat5.samp;
517 cat5->norm.tex = instr->cat5.tex;
518 }
519
520 cat5->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
521 cat5->wrmask = dst->wrmask;
522 cat5->type = instr->cat5.type;
523 cat5->is_3d = !!(instr->flags & IR3_INSTR_3D);
524 cat5->is_a = !!(instr->flags & IR3_INSTR_A);
525 cat5->is_s = !!(instr->flags & IR3_INSTR_S);
526 cat5->is_s2en_bindless = !!(instr->flags & (IR3_INSTR_S2EN | IR3_INSTR_B));
527 cat5->is_o = !!(instr->flags & IR3_INSTR_O);
528 cat5->is_p = !!(instr->flags & IR3_INSTR_P);
529 cat5->opc = instr->opc;
530 cat5->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
531 cat5->sync = !!(instr->flags & IR3_INSTR_SY);
532 cat5->opc_cat = 5;
533
534 return 0;
535 }
536
emit_cat6_a6xx(struct ir3_instruction * instr,void * ptr,struct ir3_info * info)537 static int emit_cat6_a6xx(struct ir3_instruction *instr, void *ptr,
538 struct ir3_info *info)
539 {
540 struct ir3_register *ssbo;
541 instr_cat6_a6xx_t *cat6 = ptr;
542
543 ssbo = instr->regs[1];
544
545 cat6->type = instr->cat6.type;
546 cat6->d = instr->cat6.d - (instr->opc == OPC_LDC ? 0 : 1);
547 cat6->typed = instr->cat6.typed;
548 cat6->type_size = instr->cat6.iim_val - 1;
549 cat6->opc = instr->opc;
550 cat6->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
551 cat6->sync = !!(instr->flags & IR3_INSTR_SY);
552 cat6->opc_cat = 6;
553
554 cat6->ssbo = reg(ssbo, info, instr->repeat, IR3_REG_IMMED);
555
556 /* For unused sources in an opcode, initialize contents with the ir3 dest
557 * reg
558 */
559 switch (instr->opc) {
560 case OPC_RESINFO:
561 cat6->src1 = reg(instr->regs[0], info, instr->repeat, 0);
562 cat6->src2 = reg(instr->regs[0], info, instr->repeat, 0);
563 break;
564 case OPC_LDC:
565 case OPC_LDIB:
566 cat6->src1 = reg(instr->regs[2], info, instr->repeat, 0);
567 cat6->src2 = reg(instr->regs[0], info, instr->repeat, 0);
568 break;
569 default:
570 cat6->src1 = reg(instr->regs[2], info, instr->repeat, 0);
571 cat6->src2 = reg(instr->regs[3], info, instr->repeat, 0);
572 break;
573 }
574
575 if (instr->flags & IR3_INSTR_B) {
576 if (ssbo->flags & IR3_REG_IMMED) {
577 cat6->desc_mode = CAT6_BINDLESS_IMM;
578 } else {
579 cat6->desc_mode = CAT6_BINDLESS_UNIFORM;
580 }
581 cat6->base = instr->cat6.base;
582 } else {
583 if (ssbo->flags & IR3_REG_IMMED)
584 cat6->desc_mode = CAT6_IMM;
585 else
586 cat6->desc_mode = CAT6_UNIFORM;
587 }
588
589 switch (instr->opc) {
590 case OPC_ATOMIC_ADD:
591 case OPC_ATOMIC_SUB:
592 case OPC_ATOMIC_XCHG:
593 case OPC_ATOMIC_INC:
594 case OPC_ATOMIC_DEC:
595 case OPC_ATOMIC_CMPXCHG:
596 case OPC_ATOMIC_MIN:
597 case OPC_ATOMIC_MAX:
598 case OPC_ATOMIC_AND:
599 case OPC_ATOMIC_OR:
600 case OPC_ATOMIC_XOR:
601 cat6->pad1 = 0x1;
602 cat6->pad3 = 0xc;
603 cat6->pad5 = 0x3;
604 break;
605 case OPC_STIB:
606 cat6->pad1 = 0x0;
607 cat6->pad3 = 0xc;
608 cat6->pad5 = 0x2;
609 break;
610 case OPC_LDIB:
611 case OPC_RESINFO:
612 cat6->pad1 = 0x1;
613 cat6->pad3 = 0xc;
614 cat6->pad5 = 0x2;
615 break;
616 case OPC_LDC:
617 cat6->pad1 = 0x0;
618 cat6->pad3 = 0x8;
619 cat6->pad5 = 0x2;
620 break;
621 default:
622 iassert(0);
623 }
624 cat6->pad2 = 0x0;
625 cat6->pad4 = 0x0;
626
627 return 0;
628 }
629
emit_cat6(struct ir3_instruction * instr,void * ptr,struct ir3_info * info)630 static int emit_cat6(struct ir3_instruction *instr, void *ptr,
631 struct ir3_info *info)
632 {
633 struct ir3_shader_variant *v = info->data;
634 struct ir3_register *dst, *src1, *src2;
635 instr_cat6_t *cat6 = ptr;
636
637 /* In a6xx we start using a new instruction encoding for some of
638 * these instructions:
639 */
640 if (v->shader->compiler->gpu_id >= 600) {
641 switch (instr->opc) {
642 case OPC_ATOMIC_ADD:
643 case OPC_ATOMIC_SUB:
644 case OPC_ATOMIC_XCHG:
645 case OPC_ATOMIC_INC:
646 case OPC_ATOMIC_DEC:
647 case OPC_ATOMIC_CMPXCHG:
648 case OPC_ATOMIC_MIN:
649 case OPC_ATOMIC_MAX:
650 case OPC_ATOMIC_AND:
651 case OPC_ATOMIC_OR:
652 case OPC_ATOMIC_XOR:
653 /* The shared variants of these still use the old encoding: */
654 if (!(instr->flags & IR3_INSTR_G))
655 break;
656 /* fallthrough */
657 case OPC_STIB:
658 case OPC_LDIB:
659 case OPC_LDC:
660 case OPC_RESINFO:
661 return emit_cat6_a6xx(instr, ptr, info);
662 default:
663 break;
664 }
665 }
666
667 bool type_full = type_size(instr->cat6.type) == 32;
668
669 cat6->type = instr->cat6.type;
670 cat6->opc = instr->opc;
671 cat6->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
672 cat6->sync = !!(instr->flags & IR3_INSTR_SY);
673 cat6->g = !!(instr->flags & IR3_INSTR_G);
674 cat6->opc_cat = 6;
675
676 switch (instr->opc) {
677 case OPC_RESINFO:
678 case OPC_RESFMT:
679 iassert_type(instr->regs[0], type_full); /* dst */
680 iassert_type(instr->regs[1], type_full); /* src1 */
681 break;
682 case OPC_L2G:
683 case OPC_G2L:
684 iassert_type(instr->regs[0], true); /* dst */
685 iassert_type(instr->regs[1], true); /* src1 */
686 break;
687 case OPC_STG:
688 case OPC_STL:
689 case OPC_STP:
690 case OPC_STLW:
691 case OPC_STIB:
692 /* no dst, so regs[0] is dummy */
693 iassert_type(instr->regs[1], true); /* dst */
694 iassert_type(instr->regs[2], type_full); /* src1 */
695 iassert_type(instr->regs[3], true); /* src2 */
696 break;
697 default:
698 iassert_type(instr->regs[0], type_full); /* dst */
699 iassert_type(instr->regs[1], true); /* src1 */
700 if (instr->regs_count > 2)
701 iassert_type(instr->regs[2], true); /* src1 */
702 break;
703 }
704
705 /* the "dst" for a store instruction is (from the perspective
706 * of data flow in the shader, ie. register use/def, etc) in
707 * fact a register that is read by the instruction, rather
708 * than written:
709 */
710 if (is_store(instr)) {
711 iassert(instr->regs_count >= 3);
712
713 dst = instr->regs[1];
714 src1 = instr->regs[2];
715 src2 = (instr->regs_count >= 4) ? instr->regs[3] : NULL;
716 } else {
717 iassert(instr->regs_count >= 2);
718
719 dst = instr->regs[0];
720 src1 = instr->regs[1];
721 src2 = (instr->regs_count >= 3) ? instr->regs[2] : NULL;
722 }
723
724 /* TODO we need a more comprehensive list about which instructions
725 * can be encoded which way. Or possibly use IR3_INSTR_0 flag to
726 * indicate to use the src_off encoding even if offset is zero
727 * (but then what to do about dst_off?)
728 */
729 if (is_atomic(instr->opc)) {
730 instr_cat6ldgb_t *ldgb = ptr;
731
732 /* maybe these two bits both determine the instruction encoding? */
733 cat6->src_off = false;
734
735 ldgb->d = instr->cat6.d - 1;
736 ldgb->typed = instr->cat6.typed;
737 ldgb->type_size = instr->cat6.iim_val - 1;
738
739 ldgb->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
740
741 if (ldgb->g) {
742 struct ir3_register *src3 = instr->regs[3];
743 struct ir3_register *src4 = instr->regs[4];
744
745 /* first src is src_ssbo: */
746 iassert(src1->flags & IR3_REG_IMMED);
747 ldgb->src_ssbo = src1->uim_val;
748 ldgb->src_ssbo_im = 0x1;
749
750 ldgb->src1 = reg(src2, info, instr->repeat, IR3_REG_IMMED);
751 ldgb->src1_im = !!(src2->flags & IR3_REG_IMMED);
752 ldgb->src2 = reg(src3, info, instr->repeat, IR3_REG_IMMED);
753 ldgb->src2_im = !!(src3->flags & IR3_REG_IMMED);
754
755 ldgb->src3 = reg(src4, info, instr->repeat, 0);
756 ldgb->pad0 = 0x1;
757 } else {
758 ldgb->src1 = reg(src1, info, instr->repeat, IR3_REG_IMMED);
759 ldgb->src1_im = !!(src1->flags & IR3_REG_IMMED);
760 ldgb->src2 = reg(src2, info, instr->repeat, IR3_REG_IMMED);
761 ldgb->src2_im = !!(src2->flags & IR3_REG_IMMED);
762 ldgb->pad0 = 0x1;
763 ldgb->src_ssbo_im = 0x0;
764 }
765
766 return 0;
767 } else if (instr->opc == OPC_LDGB) {
768 struct ir3_register *src3 = instr->regs[3];
769 instr_cat6ldgb_t *ldgb = ptr;
770
771 /* maybe these two bits both determine the instruction encoding? */
772 cat6->src_off = false;
773
774 ldgb->d = instr->cat6.d - 1;
775 ldgb->typed = instr->cat6.typed;
776 ldgb->type_size = instr->cat6.iim_val - 1;
777
778 ldgb->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
779
780 /* first src is src_ssbo: */
781 iassert(src1->flags & IR3_REG_IMMED);
782 ldgb->src_ssbo = src1->uim_val;
783
784 /* then next two are src1/src2: */
785 ldgb->src1 = reg(src2, info, instr->repeat, IR3_REG_IMMED);
786 ldgb->src1_im = !!(src2->flags & IR3_REG_IMMED);
787 ldgb->src2 = reg(src3, info, instr->repeat, IR3_REG_IMMED);
788 ldgb->src2_im = !!(src3->flags & IR3_REG_IMMED);
789
790 ldgb->pad0 = 0x0;
791 ldgb->src_ssbo_im = true;
792
793 return 0;
794 } else if (instr->opc == OPC_RESINFO) {
795 instr_cat6ldgb_t *ldgb = ptr;
796
797 ldgb->d = instr->cat6.d - 1;
798
799 ldgb->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
800
801 /* first src is src_ssbo: */
802 ldgb->src_ssbo = reg(src1, info, instr->repeat, IR3_REG_IMMED);
803 ldgb->src_ssbo_im = !!(src1->flags & IR3_REG_IMMED);
804
805 return 0;
806 } else if ((instr->opc == OPC_STGB) || (instr->opc == OPC_STIB)) {
807 struct ir3_register *src3 = instr->regs[4];
808 instr_cat6stgb_t *stgb = ptr;
809
810 /* maybe these two bits both determine the instruction encoding? */
811 cat6->src_off = true;
812 stgb->pad3 = 0x2;
813
814 stgb->d = instr->cat6.d - 1;
815 stgb->typed = instr->cat6.typed;
816 stgb->type_size = instr->cat6.iim_val - 1;
817
818 /* first src is dst_ssbo: */
819 iassert(dst->flags & IR3_REG_IMMED);
820 stgb->dst_ssbo = dst->uim_val;
821
822 /* then src1/src2/src3: */
823 stgb->src1 = reg(src1, info, instr->repeat, 0);
824 stgb->src2 = reg(src2, info, instr->repeat, IR3_REG_IMMED);
825 stgb->src2_im = !!(src2->flags & IR3_REG_IMMED);
826 stgb->src3 = reg(src3, info, instr->repeat, IR3_REG_IMMED);
827 stgb->src3_im = !!(src3->flags & IR3_REG_IMMED);
828
829 return 0;
830 } else if (instr->cat6.src_offset || (instr->opc == OPC_LDG) ||
831 (instr->opc == OPC_LDL) || (instr->opc == OPC_LDLW)) {
832 struct ir3_register *src3 = instr->regs[3];
833 instr_cat6a_t *cat6a = ptr;
834
835 cat6->src_off = true;
836
837 if (instr->opc == OPC_LDG) {
838 /* For LDG src1 can not be immediate, so src1_imm is redundant and
839 * instead used to signal whether (when true) 'off' is a 32 bit
840 * register or an immediate offset.
841 */
842 cat6a->src1 = reg(src1, info, instr->repeat, 0);
843 cat6a->src1_im = !(src3->flags & IR3_REG_IMMED);
844 cat6a->off = reg(src3, info, instr->repeat, IR3_REG_IMMED);
845 } else {
846 cat6a->src1 = reg(src1, info, instr->repeat, IR3_REG_IMMED);
847 cat6a->src1_im = !!(src1->flags & IR3_REG_IMMED);
848 cat6a->off = reg(src3, info, instr->repeat, IR3_REG_IMMED);
849 iassert(src3->flags & IR3_REG_IMMED);
850 }
851
852 /* Num components */
853 cat6a->src2 = reg(src2, info, instr->repeat, IR3_REG_IMMED);
854 cat6a->src2_im = true;
855 } else {
856 instr_cat6b_t *cat6b = ptr;
857
858 cat6->src_off = false;
859
860 cat6b->src1 = reg(src1, info, instr->repeat, IR3_REG_IMMED | IR3_REG_HALF);
861 cat6b->src1_im = !!(src1->flags & IR3_REG_IMMED);
862 if (src2) {
863 cat6b->src2 = reg(src2, info, instr->repeat, IR3_REG_IMMED);
864 cat6b->src2_im = !!(src2->flags & IR3_REG_IMMED);
865 }
866 }
867
868 if (instr->cat6.dst_offset || (instr->opc == OPC_STG) ||
869 (instr->opc == OPC_STL) || (instr->opc == OPC_STLW)) {
870 instr_cat6c_t *cat6c = ptr;
871 cat6->dst_off = true;
872 cat6c->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
873
874 if (instr->flags & IR3_INSTR_G) {
875 struct ir3_register *src3 = instr->regs[4];
876 cat6c->off = reg(src3, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
877 if (src3->flags & IR3_REG_IMMED) {
878 /* Immediate offsets are in bytes... */
879 cat6->g = false;
880 cat6c->off *= 4;
881 }
882 } else {
883 cat6c->off = instr->cat6.dst_offset;
884 cat6c->off_high = instr->cat6.dst_offset >> 8;
885 }
886 } else {
887 instr_cat6d_t *cat6d = ptr;
888 cat6->dst_off = false;
889 cat6d->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
890 }
891
892 return 0;
893 }
894
emit_cat7(struct ir3_instruction * instr,void * ptr,struct ir3_info * info)895 static int emit_cat7(struct ir3_instruction *instr, void *ptr,
896 struct ir3_info *info)
897 {
898 instr_cat7_t *cat7 = ptr;
899
900 cat7->ss = !!(instr->flags & IR3_INSTR_SS);
901 cat7->w = instr->cat7.w;
902 cat7->r = instr->cat7.r;
903 cat7->l = instr->cat7.l;
904 cat7->g = instr->cat7.g;
905 cat7->opc = instr->opc;
906 cat7->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
907 cat7->sync = !!(instr->flags & IR3_INSTR_SY);
908 cat7->opc_cat = 7;
909
910 return 0;
911 }
912
913 static int (*emit[])(struct ir3_instruction *instr, void *ptr,
914 struct ir3_info *info) = {
915 emit_cat0, emit_cat1, emit_cat2, emit_cat3, emit_cat4, emit_cat5, emit_cat6,
916 emit_cat7,
917 };
918
ir3_assemble(struct ir3_shader_variant * v)919 void * ir3_assemble(struct ir3_shader_variant *v)
920 {
921 uint32_t *ptr, *dwords;
922 struct ir3_info *info = &v->info;
923 struct ir3 *shader = v->ir;
924 const struct ir3_compiler *compiler = v->shader->compiler;
925
926 memset(info, 0, sizeof(*info));
927 info->data = v;
928 info->max_reg = -1;
929 info->max_half_reg = -1;
930 info->max_const = -1;
931
932 uint32_t instr_count = 0;
933 foreach_block (block, &shader->block_list) {
934 foreach_instr (instr, &block->instr_list) {
935 instr_count++;
936 }
937 }
938
939 v->instrlen = DIV_ROUND_UP(instr_count, compiler->instr_align);
940
941 /* Pad out with NOPs to instrlen. */
942 info->sizedwords = v->instrlen * compiler->instr_align * sizeof(instr_t) / 4;
943
944 ptr = dwords = rzalloc_size(v, 4 * info->sizedwords);
945
946 foreach_block (block, &shader->block_list) {
947 unsigned sfu_delay = 0;
948
949 foreach_instr (instr, &block->instr_list) {
950 int ret = emit[opc_cat(instr->opc)](instr, dwords, info);
951 if (ret)
952 goto fail;
953
954 if ((instr->opc == OPC_BARY_F) && (instr->regs[0]->flags & IR3_REG_EI))
955 info->last_baryf = info->instrs_count;
956
957 unsigned instrs_count = 1 + instr->repeat + instr->nop;
958 unsigned nops_count = instr->nop;
959
960 if (instr->opc == OPC_NOP) {
961 nops_count = 1 + instr->repeat;
962 info->instrs_per_cat[0] += nops_count;
963 } else {
964 info->instrs_per_cat[opc_cat(instr->opc)] += instrs_count;
965 info->instrs_per_cat[0] += nops_count;
966 }
967
968 if (instr->opc == OPC_MOV) {
969 if (instr->cat1.src_type == instr->cat1.dst_type) {
970 info->mov_count += 1 + instr->repeat;
971 } else {
972 info->cov_count += 1 + instr->repeat;
973 }
974 }
975
976 info->instrs_count += instrs_count;
977 info->nops_count += nops_count;
978
979 dwords += 2;
980
981 if (instr->flags & IR3_INSTR_SS) {
982 info->ss++;
983 info->sstall += sfu_delay;
984 }
985
986 if (instr->flags & IR3_INSTR_SY)
987 info->sy++;
988
989 if (is_sfu(instr)) {
990 sfu_delay = 10;
991 } else if (sfu_delay > 0) {
992 sfu_delay--;
993 }
994 }
995 }
996
997 return ptr;
998
999 fail:
1000 ralloc_free(ptr);
1001 return NULL;
1002 }
1003
reg_create(struct ir3 * shader,int num,int flags)1004 static struct ir3_register * reg_create(struct ir3 *shader,
1005 int num, int flags)
1006 {
1007 struct ir3_register *reg =
1008 ir3_alloc(shader, sizeof(struct ir3_register));
1009 reg->wrmask = 1;
1010 reg->flags = flags;
1011 reg->num = num;
1012 return reg;
1013 }
1014
insert_instr(struct ir3_block * block,struct ir3_instruction * instr)1015 static void insert_instr(struct ir3_block *block,
1016 struct ir3_instruction *instr)
1017 {
1018 struct ir3 *shader = block->shader;
1019 #ifdef DEBUG
1020 instr->serialno = ++shader->instr_count;
1021 #endif
1022 list_addtail(&instr->node, &block->instr_list);
1023
1024 if (is_input(instr))
1025 array_insert(shader, shader->baryfs, instr);
1026 }
1027
ir3_block_create(struct ir3 * shader)1028 struct ir3_block * ir3_block_create(struct ir3 *shader)
1029 {
1030 struct ir3_block *block = ir3_alloc(shader, sizeof(*block));
1031 #ifdef DEBUG
1032 block->serialno = ++shader->block_count;
1033 #endif
1034 block->shader = shader;
1035 list_inithead(&block->node);
1036 list_inithead(&block->instr_list);
1037 block->predecessors = _mesa_pointer_set_create(block);
1038 return block;
1039 }
1040
instr_create(struct ir3_block * block,int nreg)1041 static struct ir3_instruction *instr_create(struct ir3_block *block, int nreg)
1042 {
1043 struct ir3_instruction *instr;
1044 unsigned sz = sizeof(*instr) + (nreg * sizeof(instr->regs[0]));
1045 char *ptr = ir3_alloc(block->shader, sz);
1046
1047 instr = (struct ir3_instruction *)ptr;
1048 ptr += sizeof(*instr);
1049 instr->regs = (struct ir3_register **)ptr;
1050
1051 #ifdef DEBUG
1052 instr->regs_max = nreg;
1053 #endif
1054
1055 return instr;
1056 }
1057
ir3_instr_create2(struct ir3_block * block,opc_t opc,int nreg)1058 struct ir3_instruction * ir3_instr_create2(struct ir3_block *block,
1059 opc_t opc, int nreg)
1060 {
1061 struct ir3_instruction *instr = instr_create(block, nreg);
1062 instr->block = block;
1063 instr->opc = opc;
1064 insert_instr(block, instr);
1065 return instr;
1066 }
1067
ir3_instr_create(struct ir3_block * block,opc_t opc)1068 struct ir3_instruction * ir3_instr_create(struct ir3_block *block, opc_t opc)
1069 {
1070 /* NOTE: we could be slightly more clever, at least for non-meta,
1071 * and choose # of regs based on category.
1072 */
1073 return ir3_instr_create2(block, opc, 4);
1074 }
1075
ir3_instr_clone(struct ir3_instruction * instr)1076 struct ir3_instruction * ir3_instr_clone(struct ir3_instruction *instr)
1077 {
1078 struct ir3_instruction *new_instr = instr_create(instr->block,
1079 instr->regs_count);
1080 struct ir3_register **regs;
1081 unsigned i;
1082
1083 regs = new_instr->regs;
1084 *new_instr = *instr;
1085 new_instr->regs = regs;
1086
1087 insert_instr(instr->block, new_instr);
1088
1089 /* clone registers: */
1090 new_instr->regs_count = 0;
1091 for (i = 0; i < instr->regs_count; i++) {
1092 struct ir3_register *reg = instr->regs[i];
1093 struct ir3_register *new_reg =
1094 ir3_reg_create(new_instr, reg->num, reg->flags);
1095 *new_reg = *reg;
1096 }
1097
1098 return new_instr;
1099 }
1100
1101 /* Add a false dependency to instruction, to ensure it is scheduled first: */
ir3_instr_add_dep(struct ir3_instruction * instr,struct ir3_instruction * dep)1102 void ir3_instr_add_dep(struct ir3_instruction *instr, struct ir3_instruction *dep)
1103 {
1104 array_insert(instr, instr->deps, dep);
1105 }
1106
ir3_reg_create(struct ir3_instruction * instr,int num,int flags)1107 struct ir3_register * ir3_reg_create(struct ir3_instruction *instr,
1108 int num, int flags)
1109 {
1110 struct ir3 *shader = instr->block->shader;
1111 struct ir3_register *reg = reg_create(shader, num, flags);
1112 #ifdef DEBUG
1113 debug_assert(instr->regs_count < instr->regs_max);
1114 #endif
1115 instr->regs[instr->regs_count++] = reg;
1116 return reg;
1117 }
1118
ir3_reg_clone(struct ir3 * shader,struct ir3_register * reg)1119 struct ir3_register * ir3_reg_clone(struct ir3 *shader,
1120 struct ir3_register *reg)
1121 {
1122 struct ir3_register *new_reg = reg_create(shader, 0, 0);
1123 *new_reg = *reg;
1124 return new_reg;
1125 }
1126
1127 void
ir3_instr_set_address(struct ir3_instruction * instr,struct ir3_instruction * addr)1128 ir3_instr_set_address(struct ir3_instruction *instr,
1129 struct ir3_instruction *addr)
1130 {
1131 if (instr->address != addr) {
1132 struct ir3 *ir = instr->block->shader;
1133
1134 debug_assert(!instr->address);
1135 debug_assert(instr->block == addr->block);
1136
1137 instr->address = addr;
1138 debug_assert(reg_num(addr->regs[0]) == REG_A0);
1139 unsigned comp = reg_comp(addr->regs[0]);
1140 if (comp == 0) {
1141 array_insert(ir, ir->a0_users, instr);
1142 } else {
1143 debug_assert(comp == 1);
1144 array_insert(ir, ir->a1_users, instr);
1145 }
1146 }
1147 }
1148
1149 void
ir3_block_clear_mark(struct ir3_block * block)1150 ir3_block_clear_mark(struct ir3_block *block)
1151 {
1152 foreach_instr (instr, &block->instr_list)
1153 instr->flags &= ~IR3_INSTR_MARK;
1154 }
1155
1156 void
ir3_clear_mark(struct ir3 * ir)1157 ir3_clear_mark(struct ir3 *ir)
1158 {
1159 foreach_block (block, &ir->block_list) {
1160 ir3_block_clear_mark(block);
1161 }
1162 }
1163
1164 unsigned
ir3_count_instructions(struct ir3 * ir)1165 ir3_count_instructions(struct ir3 *ir)
1166 {
1167 unsigned cnt = 1;
1168 foreach_block (block, &ir->block_list) {
1169 block->start_ip = cnt;
1170 foreach_instr (instr, &block->instr_list) {
1171 instr->ip = cnt++;
1172 }
1173 block->end_ip = cnt;
1174 }
1175 return cnt;
1176 }
1177
1178 /* When counting instructions for RA, we insert extra fake instructions at the
1179 * beginning of each block, where values become live, and at the end where
1180 * values die. This prevents problems where values live-in at the beginning or
1181 * live-out at the end of a block from being treated as if they were
1182 * live-in/live-out at the first/last instruction, which would be incorrect.
1183 * In ir3_legalize these ip's are assumed to be actual ip's of the final
1184 * program, so it would be incorrect to use this everywhere.
1185 */
1186
1187 unsigned
ir3_count_instructions_ra(struct ir3 * ir)1188 ir3_count_instructions_ra(struct ir3 *ir)
1189 {
1190 unsigned cnt = 1;
1191 foreach_block (block, &ir->block_list) {
1192 block->start_ip = cnt++;
1193 foreach_instr (instr, &block->instr_list) {
1194 instr->ip = cnt++;
1195 }
1196 block->end_ip = cnt++;
1197 }
1198 return cnt;
1199 }
1200
1201 struct ir3_array *
ir3_lookup_array(struct ir3 * ir,unsigned id)1202 ir3_lookup_array(struct ir3 *ir, unsigned id)
1203 {
1204 foreach_array (arr, &ir->array_list)
1205 if (arr->id == id)
1206 return arr;
1207 return NULL;
1208 }
1209
1210 void
ir3_find_ssa_uses(struct ir3 * ir,void * mem_ctx,bool falsedeps)1211 ir3_find_ssa_uses(struct ir3 *ir, void *mem_ctx, bool falsedeps)
1212 {
1213 /* We could do this in a single pass if we can assume instructions
1214 * are always sorted. Which currently might not always be true.
1215 * (In particular after ir3_group pass, but maybe other places.)
1216 */
1217 foreach_block (block, &ir->block_list)
1218 foreach_instr (instr, &block->instr_list)
1219 instr->uses = NULL;
1220
1221 foreach_block (block, &ir->block_list) {
1222 foreach_instr (instr, &block->instr_list) {
1223 foreach_ssa_src_n (src, n, instr) {
1224 if (__is_false_dep(instr, n) && !falsedeps)
1225 continue;
1226 if (!src->uses)
1227 src->uses = _mesa_pointer_set_create(mem_ctx);
1228 _mesa_set_add(src->uses, instr);
1229 }
1230 }
1231 }
1232 }
1233
1234 /**
1235 * Set the destination type of an instruction, for example if a
1236 * conversion is folded in, handling the special cases where the
1237 * instruction's dest type or opcode needs to be fixed up.
1238 */
1239 void
ir3_set_dst_type(struct ir3_instruction * instr,bool half)1240 ir3_set_dst_type(struct ir3_instruction *instr, bool half)
1241 {
1242 if (half) {
1243 instr->regs[0]->flags |= IR3_REG_HALF;
1244 } else {
1245 instr->regs[0]->flags &= ~IR3_REG_HALF;
1246 }
1247
1248 switch (opc_cat(instr->opc)) {
1249 case 1: /* move instructions */
1250 if (half) {
1251 instr->cat1.dst_type = half_type(instr->cat1.dst_type);
1252 } else {
1253 instr->cat1.dst_type = full_type(instr->cat1.dst_type);
1254 }
1255 break;
1256 case 4:
1257 if (half) {
1258 instr->opc = cat4_half_opc(instr->opc);
1259 } else {
1260 instr->opc = cat4_full_opc(instr->opc);
1261 }
1262 break;
1263 case 5:
1264 if (half) {
1265 instr->cat5.type = half_type(instr->cat5.type);
1266 } else {
1267 instr->cat5.type = full_type(instr->cat5.type);
1268 }
1269 break;
1270 }
1271 }
1272
1273 /**
1274 * One-time fixup for instruction src-types. Other than cov's that
1275 * are folded, an instruction's src type does not change.
1276 */
1277 void
ir3_fixup_src_type(struct ir3_instruction * instr)1278 ir3_fixup_src_type(struct ir3_instruction *instr)
1279 {
1280 bool half = !!(instr->regs[1]->flags & IR3_REG_HALF);
1281
1282 switch (opc_cat(instr->opc)) {
1283 case 1: /* move instructions */
1284 if (half) {
1285 instr->cat1.src_type = half_type(instr->cat1.src_type);
1286 } else {
1287 instr->cat1.src_type = full_type(instr->cat1.src_type);
1288 }
1289 break;
1290 case 3:
1291 if (half) {
1292 instr->opc = cat3_half_opc(instr->opc);
1293 } else {
1294 instr->opc = cat3_full_opc(instr->opc);
1295 }
1296 break;
1297 }
1298 }
1299
1300 static unsigned
cp_flags(unsigned flags)1301 cp_flags(unsigned flags)
1302 {
1303 /* only considering these flags (at least for now): */
1304 flags &= (IR3_REG_CONST | IR3_REG_IMMED |
1305 IR3_REG_FNEG | IR3_REG_FABS |
1306 IR3_REG_SNEG | IR3_REG_SABS |
1307 IR3_REG_BNOT | IR3_REG_RELATIV);
1308 return flags;
1309 }
1310
1311 bool
ir3_valid_flags(struct ir3_instruction * instr,unsigned n,unsigned flags)1312 ir3_valid_flags(struct ir3_instruction *instr, unsigned n,
1313 unsigned flags)
1314 {
1315 struct ir3_compiler *compiler = instr->block->shader->compiler;
1316 unsigned valid_flags;
1317
1318 if ((flags & IR3_REG_HIGH) &&
1319 (opc_cat(instr->opc) > 1) &&
1320 (compiler->gpu_id >= 600))
1321 return false;
1322
1323 flags = cp_flags(flags);
1324
1325 /* If destination is indirect, then source cannot be.. at least
1326 * I don't think so..
1327 */
1328 if ((instr->regs[0]->flags & IR3_REG_RELATIV) &&
1329 (flags & IR3_REG_RELATIV))
1330 return false;
1331
1332 if (flags & IR3_REG_RELATIV) {
1333 /* TODO need to test on earlier gens.. pretty sure the earlier
1334 * problem was just that we didn't check that the src was from
1335 * same block (since we can't propagate address register values
1336 * across blocks currently)
1337 */
1338 if (compiler->gpu_id < 600)
1339 return false;
1340
1341 /* NOTE in the special try_swap_mad_two_srcs() case we can be
1342 * called on a src that has already had an indirect load folded
1343 * in, in which case ssa() returns NULL
1344 */
1345 if (instr->regs[n+1]->flags & IR3_REG_SSA) {
1346 struct ir3_instruction *src = ssa(instr->regs[n+1]);
1347 if (src->address->block != instr->block)
1348 return false;
1349 }
1350 }
1351
1352 switch (opc_cat(instr->opc)) {
1353 case 1:
1354 valid_flags = IR3_REG_IMMED | IR3_REG_CONST | IR3_REG_RELATIV;
1355 if (flags & ~valid_flags)
1356 return false;
1357 break;
1358 case 2:
1359 valid_flags = ir3_cat2_absneg(instr->opc) |
1360 IR3_REG_CONST | IR3_REG_RELATIV;
1361
1362 if (ir3_cat2_int(instr->opc))
1363 valid_flags |= IR3_REG_IMMED;
1364
1365 if (flags & ~valid_flags)
1366 return false;
1367
1368 if (flags & (IR3_REG_CONST | IR3_REG_IMMED)) {
1369 unsigned m = (n ^ 1) + 1;
1370 /* cannot deal w/ const in both srcs:
1371 * (note that some cat2 actually only have a single src)
1372 */
1373 if (m < instr->regs_count) {
1374 struct ir3_register *reg = instr->regs[m];
1375 if ((flags & IR3_REG_CONST) && (reg->flags & IR3_REG_CONST))
1376 return false;
1377 if ((flags & IR3_REG_IMMED) && (reg->flags & IR3_REG_IMMED))
1378 return false;
1379 }
1380 }
1381 break;
1382 case 3:
1383 valid_flags = ir3_cat3_absneg(instr->opc) |
1384 IR3_REG_CONST | IR3_REG_RELATIV;
1385
1386 if (flags & ~valid_flags)
1387 return false;
1388
1389 if (flags & (IR3_REG_CONST | IR3_REG_RELATIV)) {
1390 /* cannot deal w/ const/relativ in 2nd src: */
1391 if (n == 1)
1392 return false;
1393 }
1394
1395 break;
1396 case 4:
1397 /* seems like blob compiler avoids const as src.. */
1398 /* TODO double check if this is still the case on a4xx */
1399 if (flags & (IR3_REG_CONST | IR3_REG_IMMED))
1400 return false;
1401 if (flags & (IR3_REG_SABS | IR3_REG_SNEG))
1402 return false;
1403 break;
1404 case 5:
1405 /* no flags allowed */
1406 if (flags)
1407 return false;
1408 break;
1409 case 6:
1410 valid_flags = IR3_REG_IMMED;
1411 if (flags & ~valid_flags)
1412 return false;
1413
1414 if (flags & IR3_REG_IMMED) {
1415 /* doesn't seem like we can have immediate src for store
1416 * instructions:
1417 *
1418 * TODO this restriction could also apply to load instructions,
1419 * but for load instructions this arg is the address (and not
1420 * really sure any good way to test a hard-coded immed addr src)
1421 */
1422 if (is_store(instr) && (n == 1))
1423 return false;
1424
1425 if ((instr->opc == OPC_LDL) && (n == 0))
1426 return false;
1427
1428 if ((instr->opc == OPC_STL) && (n != 2))
1429 return false;
1430
1431 if (instr->opc == OPC_STLW && n == 0)
1432 return false;
1433
1434 if (instr->opc == OPC_LDLW && n == 0)
1435 return false;
1436
1437 /* disallow immediates in anything but the SSBO slot argument for
1438 * cat6 instructions:
1439 */
1440 if (is_atomic(instr->opc) && (n != 0))
1441 return false;
1442
1443 if (is_atomic(instr->opc) && !(instr->flags & IR3_INSTR_G))
1444 return false;
1445
1446 if (instr->opc == OPC_STG && (instr->flags & IR3_INSTR_G) && (n != 2))
1447 return false;
1448
1449 /* as with atomics, these cat6 instrs can only have an immediate
1450 * for SSBO/IBO slot argument
1451 */
1452 switch (instr->opc) {
1453 case OPC_LDIB:
1454 case OPC_LDC:
1455 case OPC_RESINFO:
1456 if (n != 0)
1457 return false;
1458 break;
1459 default:
1460 break;
1461 }
1462 }
1463
1464 break;
1465 }
1466
1467 return true;
1468 }
1469