1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /** @file
25 *
26 * This file supports generating code from the FS LIR to the actual
27 * native instructions.
28 */
29
30 #include "brw_eu.h"
31 #include "brw_disasm_info.h"
32 #include "brw_fs.h"
33 #include "brw_generator.h"
34 #include "brw_cfg.h"
35 #include "dev/intel_debug.h"
36 #include "util/mesa-sha1.h"
37 #include "util/half_float.h"
38
39 static uint32_t
brw_math_function(enum opcode op)40 brw_math_function(enum opcode op)
41 {
42 switch (op) {
43 case SHADER_OPCODE_RCP:
44 return BRW_MATH_FUNCTION_INV;
45 case SHADER_OPCODE_RSQ:
46 return BRW_MATH_FUNCTION_RSQ;
47 case SHADER_OPCODE_SQRT:
48 return BRW_MATH_FUNCTION_SQRT;
49 case SHADER_OPCODE_EXP2:
50 return BRW_MATH_FUNCTION_EXP;
51 case SHADER_OPCODE_LOG2:
52 return BRW_MATH_FUNCTION_LOG;
53 case SHADER_OPCODE_POW:
54 return BRW_MATH_FUNCTION_POW;
55 case SHADER_OPCODE_SIN:
56 return BRW_MATH_FUNCTION_SIN;
57 case SHADER_OPCODE_COS:
58 return BRW_MATH_FUNCTION_COS;
59 case SHADER_OPCODE_INT_QUOTIENT:
60 return BRW_MATH_FUNCTION_INT_DIV_QUOTIENT;
61 case SHADER_OPCODE_INT_REMAINDER:
62 return BRW_MATH_FUNCTION_INT_DIV_REMAINDER;
63 default:
64 unreachable("not reached: unknown math function");
65 }
66 }
67
68 static struct brw_reg
normalize_brw_reg_for_encoding(brw_reg * reg)69 normalize_brw_reg_for_encoding(brw_reg *reg)
70 {
71 struct brw_reg brw_reg;
72
73 switch (reg->file) {
74 case ADDRESS:
75 case ARF:
76 case FIXED_GRF:
77 case IMM:
78 assert(reg->offset == 0);
79 brw_reg = *reg;
80 break;
81 case BAD_FILE:
82 /* Probably unused. */
83 brw_reg = brw_null_reg();
84 break;
85 case VGRF:
86 case ATTR:
87 case UNIFORM:
88 unreachable("not reached");
89 }
90
91 return brw_reg;
92 }
93
brw_generator(const struct brw_compiler * compiler,const struct brw_compile_params * params,struct brw_stage_prog_data * prog_data,gl_shader_stage stage)94 brw_generator::brw_generator(const struct brw_compiler *compiler,
95 const struct brw_compile_params *params,
96 struct brw_stage_prog_data *prog_data,
97 gl_shader_stage stage)
98
99 : compiler(compiler), params(params),
100 devinfo(compiler->devinfo),
101 prog_data(prog_data), dispatch_width(0),
102 debug_flag(false),
103 shader_name(NULL), stage(stage), mem_ctx(params->mem_ctx)
104 {
105 p = rzalloc(mem_ctx, struct brw_codegen);
106 brw_init_codegen(&compiler->isa, p, mem_ctx);
107 }
108
~brw_generator()109 brw_generator::~brw_generator()
110 {
111 }
112
113 class ip_record : public exec_node {
114 public:
115 DECLARE_RALLOC_CXX_OPERATORS(ip_record)
116
ip_record(int ip)117 ip_record(int ip)
118 {
119 this->ip = ip;
120 }
121
122 int ip;
123 };
124
125 bool
patch_halt_jumps()126 brw_generator::patch_halt_jumps()
127 {
128 if (this->discard_halt_patches.is_empty())
129 return false;
130
131 int scale = brw_jump_scale(p->devinfo);
132
133 /* There is a somewhat strange undocumented requirement of using
134 * HALT, according to the simulator. If some channel has HALTed to
135 * a particular UIP, then by the end of the program, every channel
136 * must have HALTed to that UIP. Furthermore, the tracking is a
137 * stack, so you can't do the final halt of a UIP after starting
138 * halting to a new UIP.
139 *
140 * Symptoms of not emitting this instruction on actual hardware
141 * included GPU hangs and sparkly rendering on the piglit discard
142 * tests.
143 */
144 brw_eu_inst *last_halt = brw_HALT(p);
145 brw_eu_inst_set_uip(p->devinfo, last_halt, 1 * scale);
146 brw_eu_inst_set_jip(p->devinfo, last_halt, 1 * scale);
147
148 int ip = p->nr_insn;
149
150 foreach_in_list(ip_record, patch_ip, &discard_halt_patches) {
151 brw_eu_inst *patch = &p->store[patch_ip->ip];
152
153 assert(brw_eu_inst_opcode(p->isa, patch) == BRW_OPCODE_HALT);
154 /* HALT takes a half-instruction distance from the pre-incremented IP. */
155 brw_eu_inst_set_uip(p->devinfo, patch, (ip - patch_ip->ip) * scale);
156 }
157
158 this->discard_halt_patches.make_empty();
159
160 return true;
161 }
162
163 void
generate_send(fs_inst * inst,struct brw_reg dst,struct brw_reg desc,struct brw_reg ex_desc,struct brw_reg payload,struct brw_reg payload2)164 brw_generator::generate_send(fs_inst *inst,
165 struct brw_reg dst,
166 struct brw_reg desc,
167 struct brw_reg ex_desc,
168 struct brw_reg payload,
169 struct brw_reg payload2)
170 {
171 if (ex_desc.file == IMM && ex_desc.ud == 0) {
172 brw_send_indirect_message(p, inst->sfid, dst, payload, desc, inst->eot);
173 if (inst->check_tdr)
174 brw_eu_inst_set_opcode(p->isa, brw_last_inst, BRW_OPCODE_SENDC);
175 } else {
176 /* If we have any sort of extended descriptor, then we need SENDS. This
177 * also covers the dual-payload case because ex_mlen goes in ex_desc.
178 */
179 brw_send_indirect_split_message(p, inst->sfid, dst, payload, payload2,
180 desc, ex_desc, inst->ex_mlen,
181 inst->send_ex_bso, inst->eot);
182 if (inst->check_tdr)
183 brw_eu_inst_set_opcode(p->isa, brw_last_inst,
184 devinfo->ver >= 12 ? BRW_OPCODE_SENDC : BRW_OPCODE_SENDSC);
185 }
186 }
187
188 void
generate_mov_indirect(fs_inst * inst,struct brw_reg dst,struct brw_reg reg,struct brw_reg indirect_byte_offset)189 brw_generator::generate_mov_indirect(fs_inst *inst,
190 struct brw_reg dst,
191 struct brw_reg reg,
192 struct brw_reg indirect_byte_offset)
193 {
194 assert(indirect_byte_offset.type == BRW_TYPE_UD);
195 assert(indirect_byte_offset.file == FIXED_GRF);
196 assert(!reg.abs && !reg.negate);
197
198 /* Gen12.5 adds the following region restriction:
199 *
200 * "Vx1 and VxH indirect addressing for Float, Half-Float, Double-Float
201 * and Quad-Word data must not be used."
202 *
203 * We require the source and destination types to match so stomp to an
204 * unsigned integer type.
205 */
206 assert(reg.type == dst.type);
207 reg.type = dst.type =
208 brw_type_with_size(BRW_TYPE_UD, brw_type_size_bits(reg.type));
209
210 unsigned imm_byte_offset = reg.nr * REG_SIZE + reg.subnr;
211
212 if (indirect_byte_offset.file == IMM) {
213 imm_byte_offset += indirect_byte_offset.ud;
214
215 reg.nr = imm_byte_offset / REG_SIZE;
216 reg.subnr = imm_byte_offset % REG_SIZE;
217 if (brw_type_size_bytes(reg.type) > 4 && !devinfo->has_64bit_int) {
218 brw_MOV(p, subscript(dst, BRW_TYPE_D, 0),
219 subscript(reg, BRW_TYPE_D, 0));
220 brw_set_default_swsb(p, tgl_swsb_null());
221 brw_MOV(p, subscript(dst, BRW_TYPE_D, 1),
222 subscript(reg, BRW_TYPE_D, 1));
223 } else {
224 brw_MOV(p, dst, reg);
225 }
226 } else {
227 /* We use VxH indirect addressing, clobbering a0.0 through a0.7. */
228 struct brw_reg addr = vec8(brw_address_reg(0));
229
230 /* Whether we can use destination dependency control without running the
231 * risk of a hang if an instruction gets shot down.
232 */
233 const bool use_dep_ctrl = !inst->predicate &&
234 inst->exec_size == dispatch_width;
235 brw_eu_inst *insn;
236
237 /* The destination stride of an instruction (in bytes) must be greater
238 * than or equal to the size of the rest of the instruction. Since the
239 * address register is of type UW, we can't use a D-type instruction.
240 * In order to get around this, re retype to UW and use a stride.
241 */
242 indirect_byte_offset =
243 retype(spread(indirect_byte_offset, 2), BRW_TYPE_UW);
244
245 /* There are a number of reasons why we don't use the base offset here.
246 * One reason is that the field is only 9 bits which means we can only
247 * use it to access the first 16 GRFs. Also, from the Haswell PRM
248 * section "Register Region Restrictions":
249 *
250 * "The lower bits of the AddressImmediate must not overflow to
251 * change the register address. The lower 5 bits of Address
252 * Immediate when added to lower 5 bits of address register gives
253 * the sub-register offset. The upper bits of Address Immediate
254 * when added to upper bits of address register gives the register
255 * address. Any overflow from sub-register offset is dropped."
256 *
257 * Since the indirect may cause us to cross a register boundary, this
258 * makes the base offset almost useless. We could try and do something
259 * clever where we use a actual base offset if base_offset % 32 == 0 but
260 * that would mean we were generating different code depending on the
261 * base offset. Instead, for the sake of consistency, we'll just do the
262 * add ourselves. This restriction is only listed in the Haswell PRM
263 * but empirical testing indicates that it applies on all older
264 * generations and is lifted on Broadwell.
265 *
266 * In the end, while base_offset is nice to look at in the generated
267 * code, using it saves us 0 instructions and would require quite a bit
268 * of case-by-case work. It's just not worth it.
269 *
270 * Due to a hardware bug some platforms (particularly Gfx11+) seem to
271 * require the address components of all channels to be valid whether or
272 * not they're active, which causes issues if we use VxH addressing
273 * under non-uniform control-flow. We can easily work around that by
274 * initializing the whole address register with a pipelined NoMask MOV
275 * instruction.
276 */
277 insn = brw_MOV(p, addr, brw_imm_uw(imm_byte_offset));
278 brw_eu_inst_set_mask_control(devinfo, insn, BRW_MASK_DISABLE);
279 brw_eu_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE);
280 if (devinfo->ver >= 12)
281 brw_set_default_swsb(p, tgl_swsb_null());
282 else
283 brw_eu_inst_set_no_dd_clear(devinfo, insn, use_dep_ctrl);
284
285 insn = brw_ADD(p, addr, indirect_byte_offset, brw_imm_uw(imm_byte_offset));
286 if (devinfo->ver >= 12)
287 brw_set_default_swsb(p, tgl_swsb_regdist(1));
288 else
289 brw_eu_inst_set_no_dd_check(devinfo, insn, use_dep_ctrl);
290
291 if (brw_type_size_bytes(reg.type) > 4 &&
292 (intel_device_info_is_9lp(devinfo) || !devinfo->has_64bit_int)) {
293 /* From the Cherryview PRM Vol 7. "Register Region Restrictions":
294 *
295 * "When source or destination datatype is 64b or operation is
296 * integer DWord multiply, indirect addressing must not be used."
297 *
298 * We may also not support Q/UQ types.
299 *
300 * To work around both of these, we do two integer MOVs instead
301 * of one 64-bit MOV. Because no double value should ever cross
302 * a register boundary, it's safe to use the immediate offset in
303 * the indirect here to handle adding 4 bytes to the offset and
304 * avoid the extra ADD to the register file.
305 */
306 brw_MOV(p, subscript(dst, BRW_TYPE_D, 0),
307 retype(brw_VxH_indirect(0, 0), BRW_TYPE_D));
308 brw_set_default_swsb(p, tgl_swsb_null());
309 brw_MOV(p, subscript(dst, BRW_TYPE_D, 1),
310 retype(brw_VxH_indirect(0, 4), BRW_TYPE_D));
311 } else {
312 struct brw_reg ind_src = brw_VxH_indirect(0, 0);
313
314 brw_MOV(p, dst, retype(ind_src, reg.type));
315 }
316 }
317 }
318
319 void
generate_shuffle(fs_inst * inst,struct brw_reg dst,struct brw_reg src,struct brw_reg idx)320 brw_generator::generate_shuffle(fs_inst *inst,
321 struct brw_reg dst,
322 struct brw_reg src,
323 struct brw_reg idx)
324 {
325 assert(src.file == FIXED_GRF);
326 assert(!src.abs && !src.negate);
327
328 /* Ivy bridge has some strange behavior that makes this a real pain to
329 * implement for 64-bit values so we just don't bother.
330 */
331 assert(devinfo->has_64bit_float || brw_type_size_bytes(src.type) <= 4);
332
333 /* Gen12.5 adds the following region restriction:
334 *
335 * "Vx1 and VxH indirect addressing for Float, Half-Float, Double-Float
336 * and Quad-Word data must not be used."
337 *
338 * We require the source and destination types to match so stomp to an
339 * unsigned integer type.
340 */
341 assert(src.type == dst.type);
342 src.type = dst.type =
343 brw_type_with_size(BRW_TYPE_UD, brw_type_size_bits(src.type));
344
345 /* Because we're using the address register, we're limited to 16-wide
346 * by the address register file and 8-wide for 64-bit types. We could try
347 * and make this instruction splittable higher up in the compiler but that
348 * gets weird because it reads all of the channels regardless of execution
349 * size. It's easier just to split it here.
350 */
351 unsigned lower_width = MIN2(16, inst->exec_size);
352 if (devinfo->ver < 20 && (element_sz(src) > 4 || element_sz(dst) > 4)) {
353 lower_width = 8;
354 }
355
356 brw_set_default_exec_size(p, cvt(lower_width) - 1);
357 for (unsigned group = 0; group < inst->exec_size; group += lower_width) {
358 brw_set_default_group(p, group);
359
360 if ((src.vstride == 0 && src.hstride == 0) ||
361 idx.file == IMM) {
362 /* Trivial, the source is already uniform or the index is a constant.
363 * We will typically not get here if the optimizer is doing its job,
364 * but asserting would be mean.
365 */
366 const unsigned i = idx.file == IMM ? idx.ud : 0;
367 struct brw_reg group_src = stride(suboffset(src, i), 0, 1, 0);
368 struct brw_reg group_dst = suboffset(dst, group << (dst.hstride - 1));
369 brw_MOV(p, group_dst, group_src);
370 } else {
371 /* We use VxH indirect addressing, clobbering a0.0 through a0.7. */
372 struct brw_reg addr = vec8(brw_address_reg(0));
373
374 struct brw_reg group_idx = suboffset(idx, group);
375
376 if (lower_width == 8 && group_idx.width == BRW_WIDTH_16) {
377 /* Things get grumpy if the register is too wide. */
378 group_idx.width--;
379 group_idx.vstride--;
380 }
381
382 assert(brw_type_size_bytes(group_idx.type) <= 4);
383 if (brw_type_size_bytes(group_idx.type) == 4) {
384 /* The destination stride of an instruction (in bytes) must be
385 * greater than or equal to the size of the rest of the
386 * instruction. Since the address register is of type UW, we
387 * can't use a D-type instruction. In order to get around this,
388 * re retype to UW and use a stride.
389 */
390 group_idx = retype(spread(group_idx, 2), BRW_TYPE_W);
391 }
392
393 uint32_t src_start_offset = src.nr * REG_SIZE + src.subnr;
394
395 /* From the Haswell PRM:
396 *
397 * "When a sequence of NoDDChk and NoDDClr are used, the last
398 * instruction that completes the scoreboard clear must have a
399 * non-zero execution mask. This means, if any kind of predication
400 * can change the execution mask or channel enable of the last
401 * instruction, the optimization must be avoided. This is to
402 * avoid instructions being shot down the pipeline when no writes
403 * are required."
404 *
405 * Whenever predication is enabled or the instructions being emitted
406 * aren't the full width, it's possible that it will be run with zero
407 * channels enabled so we can't use dependency control without
408 * running the risk of a hang if an instruction gets shot down.
409 */
410 const bool use_dep_ctrl = !inst->predicate &&
411 lower_width == dispatch_width;
412 brw_eu_inst *insn;
413
414 /* Due to a hardware bug some platforms (particularly Gfx11+) seem
415 * to require the address components of all channels to be valid
416 * whether or not they're active, which causes issues if we use VxH
417 * addressing under non-uniform control-flow. We can easily work
418 * around that by initializing the whole address register with a
419 * pipelined NoMask MOV instruction.
420 */
421 insn = brw_MOV(p, addr, brw_imm_uw(src_start_offset));
422 brw_eu_inst_set_mask_control(devinfo, insn, BRW_MASK_DISABLE);
423 brw_eu_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE);
424 if (devinfo->ver >= 12)
425 brw_set_default_swsb(p, tgl_swsb_null());
426 else
427 brw_eu_inst_set_no_dd_clear(devinfo, insn, use_dep_ctrl);
428
429 /* Take into account the component size and horizontal stride. */
430 assert(src.vstride == src.hstride + src.width);
431 insn = brw_SHL(p, addr, group_idx,
432 brw_imm_uw(util_logbase2(brw_type_size_bytes(src.type)) +
433 src.hstride - 1));
434 if (devinfo->ver >= 12)
435 brw_set_default_swsb(p, tgl_swsb_regdist(1));
436 else
437 brw_eu_inst_set_no_dd_check(devinfo, insn, use_dep_ctrl);
438
439 /* Add on the register start offset */
440 brw_ADD(p, addr, addr, brw_imm_uw(src_start_offset));
441 brw_MOV(p, suboffset(dst, group << (dst.hstride - 1)),
442 retype(brw_VxH_indirect(0, 0), src.type));
443 }
444
445 brw_set_default_swsb(p, tgl_swsb_null());
446 }
447 }
448
449 void
generate_quad_swizzle(const fs_inst * inst,struct brw_reg dst,struct brw_reg src,unsigned swiz)450 brw_generator::generate_quad_swizzle(const fs_inst *inst,
451 struct brw_reg dst, struct brw_reg src,
452 unsigned swiz)
453 {
454 /* Requires a quad. */
455 assert(inst->exec_size >= 4);
456
457 if (src.file == IMM ||
458 has_scalar_region(src)) {
459 /* The value is uniform across all channels */
460 brw_MOV(p, dst, src);
461
462 } else if (devinfo->ver < 11 && brw_type_size_bytes(src.type) == 4) {
463 /* This only works on 8-wide 32-bit values */
464 assert(inst->exec_size == 8);
465 assert(src.hstride == BRW_HORIZONTAL_STRIDE_1);
466 assert(src.vstride == src.width + 1);
467 brw_set_default_access_mode(p, BRW_ALIGN_16);
468 struct brw_reg swiz_src = stride(src, 4, 4, 1);
469 swiz_src.swizzle = swiz;
470 brw_MOV(p, dst, swiz_src);
471
472 } else {
473 assert(src.hstride == BRW_HORIZONTAL_STRIDE_1);
474 assert(src.vstride == src.width + 1);
475 const struct brw_reg src_0 = suboffset(src, BRW_GET_SWZ(swiz, 0));
476
477 switch (swiz) {
478 case BRW_SWIZZLE_XXXX:
479 case BRW_SWIZZLE_YYYY:
480 case BRW_SWIZZLE_ZZZZ:
481 case BRW_SWIZZLE_WWWW:
482 brw_MOV(p, dst, stride(src_0, 4, 4, 0));
483 break;
484
485 case BRW_SWIZZLE_XXZZ:
486 case BRW_SWIZZLE_YYWW:
487 brw_MOV(p, dst, stride(src_0, 2, 2, 0));
488 break;
489
490 case BRW_SWIZZLE_XYXY:
491 case BRW_SWIZZLE_ZWZW:
492 assert(inst->exec_size == 4);
493 brw_MOV(p, dst, stride(src_0, 0, 2, 1));
494 break;
495
496 default:
497 assert(inst->force_writemask_all);
498 brw_set_default_exec_size(p, cvt(inst->exec_size / 4) - 1);
499
500 for (unsigned c = 0; c < 4; c++) {
501 brw_eu_inst *insn = brw_MOV(
502 p, stride(suboffset(dst, c),
503 4 * inst->dst.stride, 1, 4 * inst->dst.stride),
504 stride(suboffset(src, BRW_GET_SWZ(swiz, c)), 4, 1, 0));
505
506 if (devinfo->ver < 12) {
507 brw_eu_inst_set_no_dd_clear(devinfo, insn, c < 3);
508 brw_eu_inst_set_no_dd_check(devinfo, insn, c > 0);
509 }
510
511 brw_set_default_swsb(p, tgl_swsb_null());
512 }
513
514 break;
515 }
516 }
517 }
518
519 void
generate_barrier(fs_inst *,struct brw_reg src)520 brw_generator::generate_barrier(fs_inst *, struct brw_reg src)
521 {
522 brw_barrier(p, src);
523 if (devinfo->ver >= 12) {
524 brw_set_default_swsb(p, tgl_swsb_null());
525 brw_SYNC(p, TGL_SYNC_BAR);
526 } else {
527 brw_WAIT(p);
528 }
529 }
530
531 /* For OPCODE_DDX and OPCODE_DDY, per channel of output we've got input
532 * looking like:
533 *
534 * arg0: ss0.tl ss0.tr ss0.bl ss0.br ss1.tl ss1.tr ss1.bl ss1.br
535 *
536 * Ideally, we want to produce:
537 *
538 * DDX DDY
539 * dst: (ss0.tr - ss0.tl) (ss0.tl - ss0.bl)
540 * (ss0.tr - ss0.tl) (ss0.tr - ss0.br)
541 * (ss0.br - ss0.bl) (ss0.tl - ss0.bl)
542 * (ss0.br - ss0.bl) (ss0.tr - ss0.br)
543 * (ss1.tr - ss1.tl) (ss1.tl - ss1.bl)
544 * (ss1.tr - ss1.tl) (ss1.tr - ss1.br)
545 * (ss1.br - ss1.bl) (ss1.tl - ss1.bl)
546 * (ss1.br - ss1.bl) (ss1.tr - ss1.br)
547 *
548 * and add another set of two more subspans if in 16-pixel dispatch mode.
549 *
550 * For DDX, it ends up being easy: width = 2, horiz=0 gets us the same result
551 * for each pair, and vertstride = 2 jumps us 2 elements after processing a
552 * pair. But the ideal approximation may impose a huge performance cost on
553 * sample_d. On at least Haswell, sample_d instruction does some
554 * optimizations if the same LOD is used for all pixels in the subspan.
555 *
556 * For DDY, we need to use ALIGN16 mode since it's capable of doing the
557 * appropriate swizzling.
558 */
559 void
generate_ddx(const fs_inst * inst,struct brw_reg dst,struct brw_reg src)560 brw_generator::generate_ddx(const fs_inst *inst,
561 struct brw_reg dst, struct brw_reg src)
562 {
563 unsigned vstride, width;
564
565 if (inst->opcode == FS_OPCODE_DDX_FINE) {
566 /* produce accurate derivatives */
567 vstride = BRW_VERTICAL_STRIDE_2;
568 width = BRW_WIDTH_2;
569 } else {
570 /* replicate the derivative at the top-left pixel to other pixels */
571 vstride = BRW_VERTICAL_STRIDE_4;
572 width = BRW_WIDTH_4;
573 }
574
575 struct brw_reg src0 = byte_offset(src, brw_type_size_bytes(src.type));;
576 struct brw_reg src1 = src;
577
578 src0.vstride = vstride;
579 src0.width = width;
580 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
581 src1.vstride = vstride;
582 src1.width = width;
583 src1.hstride = BRW_HORIZONTAL_STRIDE_0;
584
585 brw_ADD(p, dst, src0, negate(src1));
586 }
587
588 /* The negate_value boolean is used to negate the derivative computation for
589 * FBOs, since they place the origin at the upper left instead of the lower
590 * left.
591 */
592 void
generate_ddy(const fs_inst * inst,struct brw_reg dst,struct brw_reg src)593 brw_generator::generate_ddy(const fs_inst *inst,
594 struct brw_reg dst, struct brw_reg src)
595 {
596 const uint32_t type_size = brw_type_size_bytes(src.type);
597
598 if (inst->opcode == FS_OPCODE_DDY_FINE) {
599 /* produce accurate derivatives.
600 *
601 * From the Broadwell PRM, Volume 7 (3D-Media-GPGPU)
602 * "Register Region Restrictions", Section "1. Special Restrictions":
603 *
604 * "In Align16 mode, the channel selects and channel enables apply to
605 * a pair of half-floats, because these parameters are defined for
606 * DWord elements ONLY. This is applicable when both source and
607 * destination are half-floats."
608 *
609 * So for half-float operations we use the Gfx11+ Align1 path. CHV
610 * inherits its FP16 hardware from SKL, so it is not affected.
611 */
612 if (devinfo->ver >= 11) {
613 src = stride(src, 0, 2, 1);
614
615 brw_push_insn_state(p);
616 brw_set_default_exec_size(p, BRW_EXECUTE_4);
617 for (uint32_t g = 0; g < inst->exec_size; g += 4) {
618 brw_set_default_group(p, inst->group + g);
619 brw_ADD(p, byte_offset(dst, g * type_size),
620 negate(byte_offset(src, g * type_size)),
621 byte_offset(src, (g + 2) * type_size));
622 brw_set_default_swsb(p, tgl_swsb_null());
623 }
624 brw_pop_insn_state(p);
625 } else {
626 struct brw_reg src0 = stride(src, 4, 4, 1);
627 struct brw_reg src1 = stride(src, 4, 4, 1);
628 src0.swizzle = BRW_SWIZZLE_XYXY;
629 src1.swizzle = BRW_SWIZZLE_ZWZW;
630
631 brw_push_insn_state(p);
632 brw_set_default_access_mode(p, BRW_ALIGN_16);
633 brw_ADD(p, dst, negate(src0), src1);
634 brw_pop_insn_state(p);
635 }
636 } else {
637 /* replicate the derivative at the top-left pixel to other pixels */
638 struct brw_reg src0 = byte_offset(stride(src, 4, 4, 0), 0 * type_size);
639 struct brw_reg src1 = byte_offset(stride(src, 4, 4, 0), 2 * type_size);
640
641 brw_ADD(p, dst, negate(src0), src1);
642 }
643 }
644
645 void
generate_halt(fs_inst *)646 brw_generator::generate_halt(fs_inst *)
647 {
648 /* This HALT will be patched up at FB write time to point UIP at the end of
649 * the program, and at brw_uip_jip() JIP will be set to the end of the
650 * current block (or the program).
651 */
652 this->discard_halt_patches.push_tail(new(mem_ctx) ip_record(p->nr_insn));
653 brw_HALT(p);
654 }
655
656 /* The A32 messages take a buffer base address in header.5:[31:0] (See
657 * MH1_A32_PSM for typed messages or MH_A32_GO for byte/dword scattered
658 * and OWord block messages in the SKL PRM Vol. 2d for more details.)
659 * Unfortunately, there are a number of subtle differences:
660 *
661 * For the block read/write messages:
662 *
663 * - We always stomp header.2 to fill in the actual scratch address (in
664 * units of OWORDs) so we don't care what's in there.
665 *
666 * - They rely on per-thread scratch space value in header.3[3:0] to do
667 * bounds checking so that needs to be valid. The upper bits of
668 * header.3 are ignored, though, so we can copy all of g0.3.
669 *
670 * - They ignore header.5[9:0] and assumes the address is 1KB aligned.
671 *
672 *
673 * For the byte/dword scattered read/write messages:
674 *
675 * - We want header.2 to be zero because that gets added to the per-channel
676 * offset in the non-header portion of the message.
677 *
678 * - Contrary to what the docs claim, they don't do any bounds checking so
679 * the value of header.3[3:0] doesn't matter.
680 *
681 * - They consider all of header.5 for the base address and header.5[9:0]
682 * are not ignored. This means that we can't copy g0.5 verbatim because
683 * g0.5[9:0] contains the FFTID on most platforms. Instead, we have to
684 * use an AND to mask off the bottom 10 bits.
685 *
686 *
687 * For block messages, just copying g0 gives a valid header because all the
688 * garbage gets ignored except for header.2 which we stomp as part of message
689 * setup. For byte/dword scattered messages, we can just zero out the header
690 * and copy over the bits we need from g0.5. This opcode, however, tries to
691 * satisfy the requirements of both by starting with 0 and filling out the
692 * information required by either set of opcodes.
693 */
694 void
generate_scratch_header(fs_inst * inst,struct brw_reg dst,struct brw_reg src)695 brw_generator::generate_scratch_header(fs_inst *inst,
696 struct brw_reg dst,
697 struct brw_reg src)
698 {
699 assert(inst->exec_size == 8 && inst->force_writemask_all);
700 assert(dst.file == FIXED_GRF);
701 assert(src.file == FIXED_GRF);
702 assert(src.type == BRW_TYPE_UD);
703
704 dst.type = BRW_TYPE_UD;
705
706 brw_eu_inst *insn = brw_MOV(p, dst, brw_imm_ud(0));
707 if (devinfo->ver >= 12)
708 brw_set_default_swsb(p, tgl_swsb_null());
709 else
710 brw_eu_inst_set_no_dd_clear(p->devinfo, insn, true);
711
712 /* Copy the per-thread scratch space size from g0.3[3:0] */
713 brw_set_default_exec_size(p, BRW_EXECUTE_1);
714 insn = brw_AND(p, suboffset(dst, 3), component(src, 3),
715 brw_imm_ud(INTEL_MASK(3, 0)));
716 if (devinfo->ver < 12) {
717 brw_eu_inst_set_no_dd_clear(p->devinfo, insn, true);
718 brw_eu_inst_set_no_dd_check(p->devinfo, insn, true);
719 }
720
721 /* Copy the scratch base address from g0.5[31:10] */
722 insn = brw_AND(p, suboffset(dst, 5), component(src, 5),
723 brw_imm_ud(INTEL_MASK(31, 10)));
724 if (devinfo->ver < 12)
725 brw_eu_inst_set_no_dd_check(p->devinfo, insn, true);
726 }
727
728 void
enable_debug(const char * shader_name)729 brw_generator::enable_debug(const char *shader_name)
730 {
731 debug_flag = true;
732 this->shader_name = shader_name;
733 }
734
735 static gfx12_systolic_depth
translate_systolic_depth(unsigned d)736 translate_systolic_depth(unsigned d)
737 {
738 /* Could also return (ffs(d) - 1) & 3. */
739 switch (d) {
740 case 2: return BRW_SYSTOLIC_DEPTH_2;
741 case 4: return BRW_SYSTOLIC_DEPTH_4;
742 case 8: return BRW_SYSTOLIC_DEPTH_8;
743 case 16: return BRW_SYSTOLIC_DEPTH_16;
744 default: unreachable("Invalid systolic depth.");
745 }
746 }
747
748 int
generate_code(const cfg_t * cfg,int dispatch_width,struct brw_shader_stats shader_stats,const brw::performance & perf,struct brw_compile_stats * stats,unsigned max_polygons)749 brw_generator::generate_code(const cfg_t *cfg, int dispatch_width,
750 struct brw_shader_stats shader_stats,
751 const brw::performance &perf,
752 struct brw_compile_stats *stats,
753 unsigned max_polygons)
754 {
755 /* align to 64 byte boundary. */
756 brw_realign(p, 64);
757
758 this->dispatch_width = dispatch_width;
759
760 int start_offset = p->next_insn_offset;
761
762 int loop_count = 0, send_count = 0, nop_count = 0, sync_nop_count = 0;
763 bool is_accum_used = false;
764
765 struct disasm_info *disasm_info = disasm_initialize(p->isa, cfg);
766
767 enum opcode prev_opcode = BRW_OPCODE_ILLEGAL;
768 foreach_block_and_inst (block, fs_inst, inst, cfg) {
769 if (inst->opcode == SHADER_OPCODE_UNDEF)
770 continue;
771
772 struct brw_reg src[4], dst;
773 unsigned int last_insn_offset = p->next_insn_offset;
774 bool multiple_instructions_emitted = false;
775 tgl_swsb swsb = inst->sched;
776
777 /* From the Broadwell PRM, Volume 7, "3D-Media-GPGPU", in the
778 * "Register Region Restrictions" section: for BDW, SKL:
779 *
780 * "A POW/FDIV operation must not be followed by an instruction
781 * that requires two destination registers."
782 *
783 * The documentation is often lacking annotations for Atom parts,
784 * and empirically this affects CHV as well.
785 */
786 if (devinfo->ver <= 9 &&
787 p->nr_insn > 1 &&
788 brw_eu_inst_opcode(p->isa, brw_last_inst) == BRW_OPCODE_MATH &&
789 brw_eu_inst_math_function(devinfo, brw_last_inst) == BRW_MATH_FUNCTION_POW &&
790 inst->dst.component_size(inst->exec_size) > REG_SIZE) {
791 brw_NOP(p);
792 last_insn_offset = p->next_insn_offset;
793
794 /* In order to avoid spurious instruction count differences when the
795 * instruction schedule changes, keep track of the number of inserted
796 * NOPs.
797 */
798 nop_count++;
799 }
800
801 /* Wa_14010017096:
802 *
803 * Clear accumulator register before end of thread.
804 */
805 if (inst->eot && is_accum_used &&
806 intel_needs_workaround(devinfo, 14010017096)) {
807 brw_set_default_exec_size(p, BRW_EXECUTE_16);
808 brw_set_default_group(p, 0);
809 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
810 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
811 brw_set_default_flag_reg(p, 0, 0);
812 brw_set_default_swsb(p, tgl_swsb_src_dep(swsb));
813 brw_MOV(p, brw_acc_reg(8), brw_imm_f(0.0f));
814 last_insn_offset = p->next_insn_offset;
815 swsb = tgl_swsb_dst_dep(swsb, 1);
816 }
817
818 if (!is_accum_used && !inst->eot) {
819 is_accum_used = inst->writes_accumulator_implicitly(devinfo) ||
820 inst->dst.is_accumulator();
821 }
822
823 /* Wa_14013672992:
824 *
825 * Always use @1 SWSB for EOT.
826 */
827 if (inst->eot && intel_needs_workaround(devinfo, 14013672992)) {
828 if (tgl_swsb_src_dep(swsb).mode) {
829 brw_set_default_exec_size(p, BRW_EXECUTE_1);
830 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
831 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
832 brw_set_default_flag_reg(p, 0, 0);
833 brw_set_default_swsb(p, tgl_swsb_src_dep(swsb));
834 brw_SYNC(p, TGL_SYNC_NOP);
835 last_insn_offset = p->next_insn_offset;
836 }
837
838 swsb = tgl_swsb_dst_dep(swsb, 1);
839 }
840
841 if (unlikely(debug_flag))
842 disasm_annotate(disasm_info, inst, p->next_insn_offset);
843
844 if (devinfo->ver >= 20 && inst->group % 8 != 0) {
845 assert(inst->force_writemask_all);
846 assert(!inst->predicate && !inst->conditional_mod);
847 assert(!inst->writes_accumulator_implicitly(devinfo) &&
848 !inst->reads_accumulator_implicitly());
849 assert(inst->opcode != SHADER_OPCODE_SEL_EXEC);
850 brw_set_default_group(p, 0);
851 } else {
852 brw_set_default_group(p, inst->group);
853 }
854
855 for (unsigned int i = 0; i < inst->sources; i++) {
856 src[i] = normalize_brw_reg_for_encoding(&inst->src[i]);
857 /* The accumulator result appears to get used for the
858 * conditional modifier generation. When negating a UD
859 * value, there is a 33rd bit generated for the sign in the
860 * accumulator value, so now you can't check, for example,
861 * equality with a 32-bit value. See piglit fs-op-neg-uvec4.
862 */
863 assert(!inst->conditional_mod ||
864 inst->src[i].type != BRW_TYPE_UD ||
865 !inst->src[i].negate);
866 }
867 dst = normalize_brw_reg_for_encoding(&inst->dst);
868
869 brw_set_default_access_mode(p, BRW_ALIGN_1);
870 brw_set_default_predicate_control(p, inst->predicate);
871 brw_set_default_predicate_inverse(p, inst->predicate_inverse);
872 /* On gfx7 and above, hardware automatically adds the group onto the
873 * flag subregister number.
874 */
875 const unsigned flag_subreg = inst->flag_subreg;
876 brw_set_default_flag_reg(p, flag_subreg / 2, flag_subreg % 2);
877 brw_set_default_saturate(p, inst->saturate);
878 brw_set_default_mask_control(p, inst->force_writemask_all);
879 if (devinfo->ver >= 20 && inst->writes_accumulator) {
880 assert(inst->dst.is_accumulator() ||
881 inst->opcode == BRW_OPCODE_ADDC ||
882 inst->opcode == BRW_OPCODE_MACH ||
883 inst->opcode == BRW_OPCODE_SUBB);
884 } else {
885 brw_set_default_acc_write_control(p, inst->writes_accumulator);
886 }
887 brw_set_default_swsb(p, swsb);
888
889 unsigned exec_size = inst->exec_size;
890
891 brw_set_default_exec_size(p, cvt(exec_size) - 1);
892
893 assert(inst->force_writemask_all || inst->exec_size >= 4);
894 assert(inst->force_writemask_all || inst->group % inst->exec_size == 0);
895 assert(inst->mlen <= BRW_MAX_MSG_LENGTH * reg_unit(devinfo));
896
897 switch (inst->opcode) {
898 case BRW_OPCODE_NOP:
899 brw_NOP(p);
900 break;
901 case BRW_OPCODE_SYNC:
902 assert(src[0].file == IMM);
903 brw_SYNC(p, tgl_sync_function(src[0].ud));
904
905 if (tgl_sync_function(src[0].ud) == TGL_SYNC_NOP)
906 ++sync_nop_count;
907
908 break;
909 case BRW_OPCODE_MOV:
910 brw_MOV(p, dst, src[0]);
911 break;
912 case BRW_OPCODE_ADD:
913 brw_ADD(p, dst, src[0], src[1]);
914 break;
915 case BRW_OPCODE_MUL:
916 brw_MUL(p, dst, src[0], src[1]);
917 break;
918 case BRW_OPCODE_AVG:
919 brw_AVG(p, dst, src[0], src[1]);
920 break;
921 case BRW_OPCODE_MACH:
922 brw_MACH(p, dst, src[0], src[1]);
923 break;
924
925 case BRW_OPCODE_DP4A:
926 assert(devinfo->ver >= 12);
927 brw_DP4A(p, dst, src[0], src[1], src[2]);
928 break;
929
930 case BRW_OPCODE_LINE:
931 brw_LINE(p, dst, src[0], src[1]);
932 break;
933
934 case BRW_OPCODE_DPAS:
935 assert(devinfo->verx10 >= 125);
936 brw_DPAS(p, translate_systolic_depth(inst->sdepth), inst->rcount,
937 dst, src[0], src[1], src[2]);
938 break;
939
940 case BRW_OPCODE_MAD:
941 if (devinfo->ver < 10)
942 brw_set_default_access_mode(p, BRW_ALIGN_16);
943 brw_MAD(p, dst, src[0], src[1], src[2]);
944 break;
945
946 case BRW_OPCODE_LRP:
947 assert(devinfo->ver <= 10);
948 if (devinfo->ver < 10)
949 brw_set_default_access_mode(p, BRW_ALIGN_16);
950 brw_LRP(p, dst, src[0], src[1], src[2]);
951 break;
952
953 case BRW_OPCODE_ADD3:
954 assert(devinfo->verx10 >= 125);
955 brw_ADD3(p, dst, src[0], src[1], src[2]);
956 break;
957
958 case BRW_OPCODE_FRC:
959 brw_FRC(p, dst, src[0]);
960 break;
961 case BRW_OPCODE_RNDD:
962 brw_RNDD(p, dst, src[0]);
963 break;
964 case BRW_OPCODE_RNDE:
965 brw_RNDE(p, dst, src[0]);
966 break;
967 case BRW_OPCODE_RNDZ:
968 brw_RNDZ(p, dst, src[0]);
969 break;
970
971 case BRW_OPCODE_AND:
972 brw_AND(p, dst, src[0], src[1]);
973 break;
974 case BRW_OPCODE_OR:
975 brw_OR(p, dst, src[0], src[1]);
976 break;
977 case BRW_OPCODE_XOR:
978 brw_XOR(p, dst, src[0], src[1]);
979 break;
980 case BRW_OPCODE_NOT:
981 brw_NOT(p, dst, src[0]);
982 break;
983 case BRW_OPCODE_ASR:
984 brw_ASR(p, dst, src[0], src[1]);
985 break;
986 case BRW_OPCODE_SHR:
987 brw_SHR(p, dst, src[0], src[1]);
988 break;
989 case BRW_OPCODE_SHL:
990 brw_SHL(p, dst, src[0], src[1]);
991 break;
992 case BRW_OPCODE_ROL:
993 assert(devinfo->ver >= 11);
994 assert(src[0].type == dst.type);
995 brw_ROL(p, dst, src[0], src[1]);
996 break;
997 case BRW_OPCODE_ROR:
998 assert(devinfo->ver >= 11);
999 assert(src[0].type == dst.type);
1000 brw_ROR(p, dst, src[0], src[1]);
1001 break;
1002 case BRW_OPCODE_CMP:
1003 brw_CMP(p, dst, inst->conditional_mod, src[0], src[1]);
1004 break;
1005 case BRW_OPCODE_CMPN:
1006 brw_CMPN(p, dst, inst->conditional_mod, src[0], src[1]);
1007 break;
1008 case BRW_OPCODE_SEL:
1009 brw_SEL(p, dst, src[0], src[1]);
1010 break;
1011 case BRW_OPCODE_CSEL:
1012 if (devinfo->ver < 10)
1013 brw_set_default_access_mode(p, BRW_ALIGN_16);
1014 brw_CSEL(p, dst, src[0], src[1], src[2]);
1015 break;
1016 case BRW_OPCODE_BFREV:
1017 brw_BFREV(p, retype(dst, BRW_TYPE_UD), retype(src[0], BRW_TYPE_UD));
1018 break;
1019 case BRW_OPCODE_FBH:
1020 brw_FBH(p, retype(dst, src[0].type), src[0]);
1021 break;
1022 case BRW_OPCODE_FBL:
1023 brw_FBL(p, retype(dst, BRW_TYPE_UD), retype(src[0], BRW_TYPE_UD));
1024 break;
1025 case BRW_OPCODE_LZD:
1026 brw_LZD(p, dst, src[0]);
1027 break;
1028 case BRW_OPCODE_CBIT:
1029 brw_CBIT(p, retype(dst, BRW_TYPE_UD), retype(src[0], BRW_TYPE_UD));
1030 break;
1031 case BRW_OPCODE_ADDC:
1032 brw_ADDC(p, dst, src[0], src[1]);
1033 break;
1034 case BRW_OPCODE_SUBB:
1035 brw_SUBB(p, dst, src[0], src[1]);
1036 break;
1037 case BRW_OPCODE_MAC:
1038 brw_MAC(p, dst, src[0], src[1]);
1039 break;
1040
1041 case BRW_OPCODE_BFE:
1042 if (devinfo->ver < 10)
1043 brw_set_default_access_mode(p, BRW_ALIGN_16);
1044 brw_BFE(p, dst, src[0], src[1], src[2]);
1045 break;
1046
1047 case BRW_OPCODE_BFI1:
1048 brw_BFI1(p, dst, src[0], src[1]);
1049 break;
1050 case BRW_OPCODE_BFI2:
1051 if (devinfo->ver < 10)
1052 brw_set_default_access_mode(p, BRW_ALIGN_16);
1053 brw_BFI2(p, dst, src[0], src[1], src[2]);
1054 break;
1055
1056 case BRW_OPCODE_IF:
1057 brw_IF(p, brw_get_default_exec_size(p));
1058 break;
1059
1060 case BRW_OPCODE_ELSE:
1061 brw_ELSE(p);
1062 break;
1063 case BRW_OPCODE_ENDIF:
1064 brw_ENDIF(p);
1065 break;
1066
1067 case BRW_OPCODE_DO:
1068 brw_DO(p, brw_get_default_exec_size(p));
1069 break;
1070
1071 case BRW_OPCODE_BREAK:
1072 brw_BREAK(p);
1073 break;
1074 case BRW_OPCODE_CONTINUE:
1075 brw_CONT(p);
1076 break;
1077
1078 case BRW_OPCODE_WHILE:
1079 /* On LNL and newer, if we don't put a NOP in between two consecutive
1080 * WHILE instructions we may end up with misrendering or GPU hangs.
1081 * See HSD 22020521218.
1082 */
1083 if (devinfo->ver >= 20 && unlikely(prev_opcode == BRW_OPCODE_WHILE))
1084 brw_NOP(p);
1085
1086 brw_WHILE(p);
1087 loop_count++;
1088 break;
1089
1090 case SHADER_OPCODE_RCP:
1091 case SHADER_OPCODE_RSQ:
1092 case SHADER_OPCODE_SQRT:
1093 case SHADER_OPCODE_EXP2:
1094 case SHADER_OPCODE_LOG2:
1095 case SHADER_OPCODE_SIN:
1096 case SHADER_OPCODE_COS:
1097 assert(inst->conditional_mod == BRW_CONDITIONAL_NONE);
1098 assert(inst->mlen == 0);
1099 gfx6_math(p, dst, brw_math_function(inst->opcode),
1100 src[0], retype(brw_null_reg(), src[0].type));
1101 break;
1102 case SHADER_OPCODE_INT_QUOTIENT:
1103 case SHADER_OPCODE_INT_REMAINDER:
1104 case SHADER_OPCODE_POW:
1105 assert(devinfo->verx10 < 125);
1106 assert(inst->conditional_mod == BRW_CONDITIONAL_NONE);
1107 assert(inst->mlen == 0);
1108 assert(inst->opcode == SHADER_OPCODE_POW || inst->exec_size == 8);
1109 gfx6_math(p, dst, brw_math_function(inst->opcode), src[0], src[1]);
1110 break;
1111 case BRW_OPCODE_PLN:
1112 /* PLN reads:
1113 * / in SIMD16 \
1114 * -----------------------------------
1115 * | src1+0 | src1+1 | src1+2 | src1+3 |
1116 * |-----------------------------------|
1117 * |(x0, x1)|(y0, y1)|(x2, x3)|(y2, y3)|
1118 * -----------------------------------
1119 */
1120 brw_PLN(p, dst, src[0], src[1]);
1121 break;
1122 case FS_OPCODE_PIXEL_X:
1123 assert(src[0].type == BRW_TYPE_UW);
1124 assert(src[1].type == BRW_TYPE_UW);
1125 src[0].subnr = 0 * brw_type_size_bytes(src[0].type);
1126 if (src[1].file == IMM) {
1127 assert(src[1].ud == 0);
1128 brw_MOV(p, dst, stride(src[0], 8, 4, 1));
1129 } else {
1130 /* Coarse pixel case */
1131 brw_ADD(p, dst, stride(src[0], 8, 4, 1), src[1]);
1132 }
1133 break;
1134 case FS_OPCODE_PIXEL_Y:
1135 assert(src[0].type == BRW_TYPE_UW);
1136 assert(src[1].type == BRW_TYPE_UW);
1137 src[0].subnr = 4 * brw_type_size_bytes(src[0].type);
1138 if (src[1].file == IMM) {
1139 assert(src[1].ud == 0);
1140 brw_MOV(p, dst, stride(src[0], 8, 4, 1));
1141 } else {
1142 /* Coarse pixel case */
1143 brw_ADD(p, dst, stride(src[0], 8, 4, 1), src[1]);
1144 }
1145 break;
1146
1147 case SHADER_OPCODE_SEND:
1148 generate_send(inst, dst, src[0], src[1], src[2],
1149 inst->ex_mlen > 0 ? src[3] : brw_null_reg());
1150 send_count++;
1151 break;
1152
1153 case FS_OPCODE_DDX_COARSE:
1154 case FS_OPCODE_DDX_FINE:
1155 generate_ddx(inst, dst, src[0]);
1156 break;
1157 case FS_OPCODE_DDY_COARSE:
1158 case FS_OPCODE_DDY_FINE:
1159 generate_ddy(inst, dst, src[0]);
1160 break;
1161
1162 case SHADER_OPCODE_SCRATCH_HEADER:
1163 generate_scratch_header(inst, dst, src[0]);
1164 break;
1165
1166 case SHADER_OPCODE_MOV_INDIRECT:
1167 generate_mov_indirect(inst, dst, src[0], src[1]);
1168 break;
1169
1170 case SHADER_OPCODE_MOV_RELOC_IMM:
1171 assert(src[0].file == IMM);
1172 assert(src[1].file == IMM);
1173 brw_MOV_reloc_imm(p, dst, dst.type, src[0].ud, src[1].ud);
1174 break;
1175
1176 case BRW_OPCODE_HALT:
1177 generate_halt(inst);
1178 break;
1179
1180 case SHADER_OPCODE_INTERLOCK:
1181 case SHADER_OPCODE_MEMORY_FENCE: {
1182 assert(src[1].file == IMM);
1183 assert(src[2].file == IMM);
1184
1185 const enum opcode send_op = inst->opcode == SHADER_OPCODE_INTERLOCK ?
1186 BRW_OPCODE_SENDC : BRW_OPCODE_SEND;
1187
1188 brw_memory_fence(p, dst, src[0], send_op,
1189 brw_message_target(inst->sfid),
1190 inst->desc,
1191 /* commit_enable */ src[1].ud,
1192 /* bti */ src[2].ud);
1193 send_count++;
1194 break;
1195 }
1196
1197 case FS_OPCODE_SCHEDULING_FENCE:
1198 if (inst->sources == 0 && swsb.regdist == 0 &&
1199 swsb.mode == TGL_SBID_NULL) {
1200 if (unlikely(debug_flag))
1201 disasm_info->use_tail = true;
1202 break;
1203 }
1204
1205 if (devinfo->ver >= 12) {
1206 /* Use the available SWSB information to stall. A single SYNC is
1207 * sufficient since if there were multiple dependencies, the
1208 * scoreboard algorithm already injected other SYNCs before this
1209 * instruction.
1210 */
1211 brw_SYNC(p, TGL_SYNC_NOP);
1212 } else {
1213 for (unsigned i = 0; i < inst->sources; i++) {
1214 /* Emit a MOV to force a stall until the instruction producing the
1215 * registers finishes.
1216 */
1217 brw_MOV(p, retype(brw_null_reg(), BRW_TYPE_UW),
1218 retype(src[i], BRW_TYPE_UW));
1219 }
1220
1221 if (inst->sources > 1)
1222 multiple_instructions_emitted = true;
1223 }
1224
1225 break;
1226
1227 case SHADER_OPCODE_FIND_LIVE_CHANNEL:
1228 case SHADER_OPCODE_FIND_LAST_LIVE_CHANNEL:
1229 case SHADER_OPCODE_LOAD_LIVE_CHANNELS:
1230 unreachable("Should be lowered by lower_find_live_channel()");
1231 break;
1232
1233 case FS_OPCODE_LOAD_LIVE_CHANNELS: {
1234 assert(inst->force_writemask_all && inst->group == 0);
1235 assert(inst->dst.file == BAD_FILE);
1236 brw_set_default_exec_size(p, BRW_EXECUTE_1);
1237 brw_MOV(p, retype(brw_flag_subreg(inst->flag_subreg), BRW_TYPE_UD),
1238 retype(brw_mask_reg(0), BRW_TYPE_UD));
1239 break;
1240 }
1241 case SHADER_OPCODE_BROADCAST:
1242 assert(inst->force_writemask_all);
1243 brw_broadcast(p, dst, src[0], src[1]);
1244 break;
1245
1246 case SHADER_OPCODE_SHUFFLE:
1247 generate_shuffle(inst, dst, src[0], src[1]);
1248 break;
1249
1250 case SHADER_OPCODE_SEL_EXEC:
1251 assert(inst->force_writemask_all);
1252 assert(devinfo->has_64bit_float || brw_type_size_bytes(dst.type) <= 4);
1253 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1254 brw_MOV(p, dst, src[1]);
1255 brw_set_default_mask_control(p, BRW_MASK_ENABLE);
1256 brw_set_default_swsb(p, tgl_swsb_null());
1257 brw_MOV(p, dst, src[0]);
1258 break;
1259
1260 case SHADER_OPCODE_QUAD_SWIZZLE:
1261 assert(src[1].file == IMM);
1262 assert(src[1].type == BRW_TYPE_UD);
1263 generate_quad_swizzle(inst, dst, src[0], src[1].ud);
1264 break;
1265
1266 case SHADER_OPCODE_CLUSTER_BROADCAST: {
1267 assert((!intel_device_info_is_9lp(devinfo) &&
1268 devinfo->has_64bit_float) || brw_type_size_bytes(src[0].type) <= 4);
1269 assert(!src[0].negate && !src[0].abs);
1270 assert(src[1].file == IMM);
1271 assert(src[1].type == BRW_TYPE_UD);
1272 assert(src[2].file == IMM);
1273 assert(src[2].type == BRW_TYPE_UD);
1274 const unsigned component = src[1].ud;
1275 const unsigned cluster_size = src[2].ud;
1276 assert(inst->src[0].file != ARF);
1277
1278 unsigned s;
1279 if (inst->src[0].file == FIXED_GRF) {
1280 s = inst->src[0].hstride ? 1 << (inst->src[0].hstride - 1) : 0;
1281 } else {
1282 s = inst->src[0].stride;
1283 }
1284 unsigned vstride = cluster_size * s;
1285 unsigned width = cluster_size;
1286
1287 /* The maximum exec_size is 32, but the maximum width is only 16. */
1288 if (inst->exec_size == width) {
1289 vstride = 0;
1290 width = 1;
1291 }
1292
1293 struct brw_reg strided = stride(suboffset(src[0], component * s),
1294 vstride, width, 0);
1295 brw_MOV(p, dst, strided);
1296 break;
1297 }
1298
1299 case SHADER_OPCODE_HALT_TARGET:
1300 /* This is the place where the final HALT needs to be inserted if
1301 * we've emitted any discards. If not, this will emit no code.
1302 */
1303 if (!patch_halt_jumps()) {
1304 if (unlikely(debug_flag)) {
1305 disasm_info->use_tail = true;
1306 }
1307 }
1308 break;
1309
1310 case SHADER_OPCODE_BARRIER:
1311 generate_barrier(inst, src[0]);
1312 send_count++;
1313 break;
1314
1315 case SHADER_OPCODE_RND_MODE: {
1316 assert(src[0].file == IMM);
1317 /*
1318 * Changes the floating point rounding mode updating the control
1319 * register field defined at cr0.0[5-6] bits.
1320 */
1321 enum brw_rnd_mode mode =
1322 (enum brw_rnd_mode) (src[0].d << BRW_CR0_RND_MODE_SHIFT);
1323 brw_float_controls_mode(p, mode, BRW_CR0_RND_MODE_MASK);
1324 }
1325 break;
1326
1327 case SHADER_OPCODE_FLOAT_CONTROL_MODE:
1328 assert(src[0].file == IMM);
1329 assert(src[1].file == IMM);
1330 brw_float_controls_mode(p, src[0].d, src[1].d);
1331 break;
1332
1333 case SHADER_OPCODE_READ_ARCH_REG:
1334 if (devinfo->ver >= 12) {
1335 /* There is a SWSB restriction that requires that any time sr0 is
1336 * accessed both the instruction doing the access and the next one
1337 * have SWSB set to RegDist(1).
1338 */
1339 if (brw_get_default_swsb(p).mode != TGL_SBID_NULL)
1340 brw_SYNC(p, TGL_SYNC_NOP);
1341 brw_set_default_swsb(p, tgl_swsb_regdist(1));
1342 brw_MOV(p, dst, src[0]);
1343 brw_set_default_swsb(p, tgl_swsb_regdist(1));
1344 brw_AND(p, dst, dst, brw_imm_ud(0xffffffff));
1345 } else {
1346 brw_MOV(p, dst, src[0]);
1347 }
1348 break;
1349
1350 default:
1351 unreachable("Unsupported opcode");
1352
1353 case SHADER_OPCODE_LOAD_PAYLOAD:
1354 unreachable("Should be lowered by lower_load_payload()");
1355 }
1356 prev_opcode = inst->opcode;
1357
1358 if (multiple_instructions_emitted)
1359 continue;
1360
1361 if (inst->no_dd_clear || inst->no_dd_check || inst->conditional_mod) {
1362 assert(p->next_insn_offset == last_insn_offset + 16 ||
1363 !"conditional_mod, no_dd_check, or no_dd_clear set for IR "
1364 "emitting more than 1 instruction");
1365
1366 brw_eu_inst *last = &p->store[last_insn_offset / 16];
1367
1368 if (inst->conditional_mod)
1369 brw_eu_inst_set_cond_modifier(p->devinfo, last, inst->conditional_mod);
1370 if (devinfo->ver < 12) {
1371 brw_eu_inst_set_no_dd_clear(p->devinfo, last, inst->no_dd_clear);
1372 brw_eu_inst_set_no_dd_check(p->devinfo, last, inst->no_dd_check);
1373 }
1374 }
1375
1376 /* When enabled, insert sync NOP after every instruction and make sure
1377 * that current instruction depends on the previous instruction.
1378 */
1379 if (INTEL_DEBUG(DEBUG_SWSB_STALL) && devinfo->ver >= 12) {
1380 brw_set_default_swsb(p, tgl_swsb_regdist(1));
1381 brw_SYNC(p, TGL_SYNC_NOP);
1382 }
1383 }
1384
1385 brw_set_uip_jip(p, start_offset);
1386
1387 /* end of program sentinel */
1388 disasm_new_inst_group(disasm_info, p->next_insn_offset);
1389
1390 /* `send_count` explicitly does not include spills or fills, as we'd
1391 * like to use it as a metric for intentional memory access or other
1392 * shared function use. Otherwise, subtle changes to scheduling or
1393 * register allocation could cause it to fluctuate wildly - and that
1394 * effect is already counted in spill/fill counts.
1395 */
1396 send_count -= shader_stats.spill_count;
1397 send_count -= shader_stats.fill_count;
1398
1399 #ifndef NDEBUG
1400 bool validated =
1401 #else
1402 if (unlikely(debug_flag))
1403 #endif
1404 brw_validate_instructions(&compiler->isa, p->store,
1405 start_offset,
1406 p->next_insn_offset,
1407 disasm_info);
1408
1409 int before_size = p->next_insn_offset - start_offset;
1410 brw_compact_instructions(p, start_offset, disasm_info);
1411 int after_size = p->next_insn_offset - start_offset;
1412
1413 bool dump_shader_bin = brw_should_dump_shader_bin();
1414 unsigned char sha1[21];
1415 char sha1buf[41];
1416
1417 if (unlikely(debug_flag || dump_shader_bin)) {
1418 _mesa_sha1_compute(p->store + start_offset / sizeof(brw_eu_inst),
1419 after_size, sha1);
1420 _mesa_sha1_format(sha1buf, sha1);
1421 }
1422
1423 if (unlikely(dump_shader_bin))
1424 brw_dump_shader_bin(p->store, start_offset, p->next_insn_offset,
1425 sha1buf);
1426
1427 if (unlikely(debug_flag)) {
1428 fprintf(stderr, "Native code for %s (src_hash 0x%08x) (sha1 %s)\n"
1429 "SIMD%d shader: %d instructions. %d loops. %u cycles. "
1430 "%d:%d spills:fills, %u sends, "
1431 "scheduled with mode %s. "
1432 "Promoted %u constants. "
1433 "Non-SSA regs (after NIR): %u. "
1434 "Compacted %d to %d bytes (%.0f%%)\n",
1435 shader_name, params->source_hash, sha1buf,
1436 dispatch_width,
1437 before_size / 16 - nop_count - sync_nop_count,
1438 loop_count, perf.latency,
1439 shader_stats.spill_count,
1440 shader_stats.fill_count,
1441 send_count,
1442 shader_stats.scheduler_mode,
1443 shader_stats.promoted_constants,
1444 shader_stats.non_ssa_registers_after_nir,
1445 before_size, after_size,
1446 100.0f * (before_size - after_size) / before_size);
1447
1448 /* overriding the shader makes disasm_info invalid */
1449 if (!brw_try_override_assembly(p, start_offset, sha1buf)) {
1450 dump_assembly(p->store, start_offset, p->next_insn_offset,
1451 disasm_info, perf.block_latency);
1452 } else {
1453 fprintf(stderr, "Successfully overrode shader with sha1 %s\n\n", sha1buf);
1454 }
1455 }
1456 ralloc_free(disasm_info);
1457 #ifndef NDEBUG
1458 if (!validated && !debug_flag) {
1459 fprintf(stderr,
1460 "Validation failed. Rerun with INTEL_DEBUG=shaders to get more information.\n");
1461 }
1462 #endif
1463 assert(validated);
1464
1465 brw_shader_debug_log(compiler, params->log_data,
1466 "%s SIMD%d shader: %d inst, %d loops, %u cycles, "
1467 "%d:%d spills:fills, %u sends, "
1468 "scheduled with mode %s, "
1469 "Promoted %u constants, "
1470 "compacted %d to %d bytes.\n",
1471 _mesa_shader_stage_to_abbrev(stage),
1472 dispatch_width,
1473 before_size / 16 - nop_count - sync_nop_count,
1474 loop_count, perf.latency,
1475 shader_stats.spill_count,
1476 shader_stats.fill_count,
1477 send_count,
1478 shader_stats.scheduler_mode,
1479 shader_stats.promoted_constants,
1480 before_size, after_size);
1481 if (stats) {
1482 stats->dispatch_width = dispatch_width;
1483 stats->max_polygons = max_polygons;
1484 stats->max_dispatch_width = dispatch_width;
1485 stats->instructions = before_size / 16 - nop_count - sync_nop_count;
1486 stats->sends = send_count;
1487 stats->loops = loop_count;
1488 stats->cycles = perf.latency;
1489 stats->spills = shader_stats.spill_count;
1490 stats->fills = shader_stats.fill_count;
1491 stats->max_live_registers = shader_stats.max_register_pressure;
1492 stats->non_ssa_registers_after_nir = shader_stats.non_ssa_registers_after_nir;
1493 }
1494
1495 return start_offset;
1496 }
1497
1498 void
add_const_data(void * data,unsigned size)1499 brw_generator::add_const_data(void *data, unsigned size)
1500 {
1501 assert(prog_data->const_data_size == 0);
1502 if (size > 0) {
1503 prog_data->const_data_size = size;
1504 prog_data->const_data_offset = brw_append_data(p, data, size, 32);
1505 }
1506 }
1507
1508 void
add_resume_sbt(unsigned num_resume_shaders,uint64_t * sbt)1509 brw_generator::add_resume_sbt(unsigned num_resume_shaders, uint64_t *sbt)
1510 {
1511 assert(brw_shader_stage_is_bindless(stage));
1512 struct brw_bs_prog_data *bs_prog_data = brw_bs_prog_data(prog_data);
1513 if (num_resume_shaders > 0) {
1514 bs_prog_data->resume_sbt_offset =
1515 brw_append_data(p, sbt, num_resume_shaders * sizeof(uint64_t), 32);
1516 for (unsigned i = 0; i < num_resume_shaders; i++) {
1517 size_t offset = bs_prog_data->resume_sbt_offset + i * sizeof(*sbt);
1518 assert(offset <= UINT32_MAX);
1519 brw_add_reloc(p, BRW_SHADER_RELOC_SHADER_START_OFFSET,
1520 BRW_SHADER_RELOC_TYPE_U32,
1521 (uint32_t)offset, (uint32_t)sbt[i]);
1522 }
1523 }
1524 }
1525
1526 const unsigned *
get_assembly()1527 brw_generator::get_assembly()
1528 {
1529 prog_data->relocs = brw_get_shader_relocs(p, &prog_data->num_relocs);
1530
1531 return brw_get_program(p, &prog_data->program_size);
1532 }
1533