• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright © 2011 Intel Corporation
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a
4  * copy of this software and associated documentation files (the "Software"),
5  * to deal in the Software without restriction, including without limitation
6  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7  * and/or sell copies of the Software, and to permit persons to whom the
8  * Software is furnished to do so, subject to the following conditions:
9  *
10  * The above copyright notice and this permission notice (including the next
11  * paragraph) shall be included in all copies or substantial portions of the
12  * Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20  * IN THE SOFTWARE.
21  */
22 
23 #include "brw_vec4.h"
24 #include "brw_cfg.h"
25 #include "brw_eu.h"
26 #include "dev/intel_debug.h"
27 #include "util/mesa-sha1.h"
28 
29 using namespace brw;
30 
31 static void
generate_math1_gfx4(struct brw_codegen * p,vec4_instruction * inst,struct brw_reg dst,struct brw_reg src)32 generate_math1_gfx4(struct brw_codegen *p,
33                     vec4_instruction *inst,
34                     struct brw_reg dst,
35                     struct brw_reg src)
36 {
37    gfx4_math(p,
38 	     dst,
39 	     brw_math_function(inst->opcode),
40 	     inst->base_mrf,
41 	     src,
42 	     BRW_MATH_PRECISION_FULL);
43 }
44 
45 static void
check_gfx6_math_src_arg(struct brw_reg src)46 check_gfx6_math_src_arg(struct brw_reg src)
47 {
48    /* Source swizzles are ignored. */
49    assert(!src.abs);
50    assert(!src.negate);
51    assert(src.swizzle == BRW_SWIZZLE_XYZW);
52 }
53 
54 static void
generate_math_gfx6(struct brw_codegen * p,vec4_instruction * inst,struct brw_reg dst,struct brw_reg src0,struct brw_reg src1)55 generate_math_gfx6(struct brw_codegen *p,
56                    vec4_instruction *inst,
57                    struct brw_reg dst,
58                    struct brw_reg src0,
59                    struct brw_reg src1)
60 {
61    /* Can't do writemask because math can't be align16. */
62    assert(dst.writemask == WRITEMASK_XYZW);
63    /* Source swizzles are ignored. */
64    check_gfx6_math_src_arg(src0);
65    if (src1.file == BRW_GENERAL_REGISTER_FILE)
66       check_gfx6_math_src_arg(src1);
67 
68    brw_set_default_access_mode(p, BRW_ALIGN_1);
69    gfx6_math(p, dst, brw_math_function(inst->opcode), src0, src1);
70    brw_set_default_access_mode(p, BRW_ALIGN_16);
71 }
72 
73 static void
generate_math2_gfx4(struct brw_codegen * p,vec4_instruction * inst,struct brw_reg dst,struct brw_reg src0,struct brw_reg src1)74 generate_math2_gfx4(struct brw_codegen *p,
75                     vec4_instruction *inst,
76                     struct brw_reg dst,
77                     struct brw_reg src0,
78                     struct brw_reg src1)
79 {
80    /* From the Ironlake PRM, Volume 4, Part 1, Section 6.1.13
81     * "Message Payload":
82     *
83     * "Operand0[7].  For the INT DIV functions, this operand is the
84     *  denominator."
85     *  ...
86     * "Operand1[7].  For the INT DIV functions, this operand is the
87     *  numerator."
88     */
89    bool is_int_div = inst->opcode != SHADER_OPCODE_POW;
90    struct brw_reg &op0 = is_int_div ? src1 : src0;
91    struct brw_reg &op1 = is_int_div ? src0 : src1;
92 
93    brw_push_insn_state(p);
94    brw_set_default_saturate(p, false);
95    brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
96    brw_MOV(p, retype(brw_message_reg(inst->base_mrf + 1), op1.type), op1);
97    brw_pop_insn_state(p);
98 
99    gfx4_math(p,
100 	     dst,
101 	     brw_math_function(inst->opcode),
102 	     inst->base_mrf,
103 	     op0,
104 	     BRW_MATH_PRECISION_FULL);
105 }
106 
107 static void
generate_tex(struct brw_codegen * p,struct brw_vue_prog_data * prog_data,gl_shader_stage stage,vec4_instruction * inst,struct brw_reg dst,struct brw_reg src,struct brw_reg surface_index,struct brw_reg sampler_index)108 generate_tex(struct brw_codegen *p,
109              struct brw_vue_prog_data *prog_data,
110              gl_shader_stage stage,
111              vec4_instruction *inst,
112              struct brw_reg dst,
113              struct brw_reg src,
114              struct brw_reg surface_index,
115              struct brw_reg sampler_index)
116 {
117    const struct intel_device_info *devinfo = p->devinfo;
118    int msg_type = -1;
119 
120    if (devinfo->ver >= 5) {
121       switch (inst->opcode) {
122       case SHADER_OPCODE_TEX:
123       case SHADER_OPCODE_TXL:
124 	 if (inst->shadow_compare) {
125 	    msg_type = GFX5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE;
126 	 } else {
127 	    msg_type = GFX5_SAMPLER_MESSAGE_SAMPLE_LOD;
128 	 }
129 	 break;
130       case SHADER_OPCODE_TXD:
131          if (inst->shadow_compare) {
132             /* Gfx7.5+.  Otherwise, lowered by brw_lower_texture_gradients(). */
133             assert(devinfo->verx10 == 75);
134             msg_type = HSW_SAMPLER_MESSAGE_SAMPLE_DERIV_COMPARE;
135          } else {
136             msg_type = GFX5_SAMPLER_MESSAGE_SAMPLE_DERIVS;
137          }
138 	 break;
139       case SHADER_OPCODE_TXF:
140 	 msg_type = GFX5_SAMPLER_MESSAGE_SAMPLE_LD;
141 	 break;
142       case SHADER_OPCODE_TXF_CMS:
143          if (devinfo->ver >= 7)
144             msg_type = GFX7_SAMPLER_MESSAGE_SAMPLE_LD2DMS;
145          else
146             msg_type = GFX5_SAMPLER_MESSAGE_SAMPLE_LD;
147          break;
148       case SHADER_OPCODE_TXF_MCS:
149          assert(devinfo->ver >= 7);
150          msg_type = GFX7_SAMPLER_MESSAGE_SAMPLE_LD_MCS;
151          break;
152       case SHADER_OPCODE_TXS:
153 	 msg_type = GFX5_SAMPLER_MESSAGE_SAMPLE_RESINFO;
154 	 break;
155       case SHADER_OPCODE_TG4:
156          if (inst->shadow_compare) {
157             msg_type = GFX7_SAMPLER_MESSAGE_SAMPLE_GATHER4_C;
158          } else {
159             msg_type = GFX7_SAMPLER_MESSAGE_SAMPLE_GATHER4;
160          }
161          break;
162       case SHADER_OPCODE_TG4_OFFSET:
163          if (inst->shadow_compare) {
164             msg_type = GFX7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_C;
165          } else {
166             msg_type = GFX7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO;
167          }
168          break;
169       case SHADER_OPCODE_SAMPLEINFO:
170          msg_type = GFX6_SAMPLER_MESSAGE_SAMPLE_SAMPLEINFO;
171          break;
172       default:
173 	 unreachable("should not get here: invalid vec4 texture opcode");
174       }
175    } else {
176       switch (inst->opcode) {
177       case SHADER_OPCODE_TEX:
178       case SHADER_OPCODE_TXL:
179 	 if (inst->shadow_compare) {
180 	    msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD_COMPARE;
181 	    assert(inst->mlen == 3);
182 	 } else {
183 	    msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD;
184 	    assert(inst->mlen == 2);
185 	 }
186 	 break;
187       case SHADER_OPCODE_TXD:
188 	 /* There is no sample_d_c message; comparisons are done manually. */
189 	 msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_GRADIENTS;
190 	 assert(inst->mlen == 4);
191 	 break;
192       case SHADER_OPCODE_TXF:
193 	 msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_LD;
194 	 assert(inst->mlen == 2);
195 	 break;
196       case SHADER_OPCODE_TXS:
197 	 msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_RESINFO;
198 	 assert(inst->mlen == 2);
199 	 break;
200       default:
201 	 unreachable("should not get here: invalid vec4 texture opcode");
202       }
203    }
204 
205    assert(msg_type != -1);
206 
207    assert(sampler_index.type == BRW_REGISTER_TYPE_UD);
208 
209    /* Load the message header if present.  If there's a texture offset, we need
210     * to set it up explicitly and load the offset bitfield.  Otherwise, we can
211     * use an implied move from g0 to the first message register.
212     */
213    if (inst->header_size != 0) {
214       if (devinfo->ver < 6 && !inst->offset) {
215          /* Set up an implied move from g0 to the MRF. */
216          src = brw_vec8_grf(0, 0);
217       } else {
218          struct brw_reg header =
219             retype(brw_message_reg(inst->base_mrf), BRW_REGISTER_TYPE_UD);
220          uint32_t dw2 = 0;
221 
222          /* Explicitly set up the message header by copying g0 to the MRF. */
223          brw_push_insn_state(p);
224          brw_set_default_mask_control(p, BRW_MASK_DISABLE);
225          brw_MOV(p, header, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
226 
227          brw_set_default_access_mode(p, BRW_ALIGN_1);
228 
229          if (inst->offset)
230             /* Set the texel offset bits in DWord 2. */
231             dw2 = inst->offset;
232 
233          /* The VS, DS, and FS stages have the g0.2 payload delivered as 0,
234           * so header0.2 is 0 when g0 is copied.  The HS and GS stages do
235           * not, so we must set to to 0 to avoid setting undesirable bits
236           * in the message header.
237           */
238          if (dw2 ||
239              stage == MESA_SHADER_TESS_CTRL ||
240              stage == MESA_SHADER_GEOMETRY) {
241             brw_MOV(p, get_element_ud(header, 2), brw_imm_ud(dw2));
242          }
243 
244          brw_adjust_sampler_state_pointer(p, header, sampler_index);
245          brw_pop_insn_state(p);
246       }
247    }
248 
249    uint32_t return_format;
250 
251    switch (dst.type) {
252    case BRW_REGISTER_TYPE_D:
253       return_format = BRW_SAMPLER_RETURN_FORMAT_SINT32;
254       break;
255    case BRW_REGISTER_TYPE_UD:
256       return_format = BRW_SAMPLER_RETURN_FORMAT_UINT32;
257       break;
258    default:
259       return_format = BRW_SAMPLER_RETURN_FORMAT_FLOAT32;
260       break;
261    }
262 
263    /* Stomp the resinfo output type to UINT32.  On gens 4-5, the output type
264     * is set as part of the message descriptor.  On gfx4, the PRM seems to
265     * allow UINT32 and FLOAT32 (i965 PRM, Vol. 4 Section 4.8.1.1), but on
266     * later gens UINT32 is required.  Once you hit Sandy Bridge, the bit is
267     * gone from the message descriptor entirely and you just get UINT32 all
268     * the time regasrdless.  Since we can really only do non-UINT32 on gfx4,
269     * just stomp it to UINT32 all the time.
270     */
271    if (inst->opcode == SHADER_OPCODE_TXS)
272       return_format = BRW_SAMPLER_RETURN_FORMAT_UINT32;
273 
274    if (surface_index.file == BRW_IMMEDIATE_VALUE &&
275        sampler_index.file == BRW_IMMEDIATE_VALUE) {
276       uint32_t surface = surface_index.ud;
277       uint32_t sampler = sampler_index.ud;
278 
279       brw_SAMPLE(p,
280                  dst,
281                  inst->base_mrf,
282                  src,
283                  surface,
284                  sampler % 16,
285                  msg_type,
286                  1, /* response length */
287                  inst->mlen,
288                  inst->header_size != 0,
289                  BRW_SAMPLER_SIMD_MODE_SIMD4X2,
290                  return_format);
291    } else {
292       /* Non-constant sampler index. */
293 
294       struct brw_reg addr = vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD));
295       struct brw_reg surface_reg = vec1(retype(surface_index, BRW_REGISTER_TYPE_UD));
296       struct brw_reg sampler_reg = vec1(retype(sampler_index, BRW_REGISTER_TYPE_UD));
297 
298       brw_push_insn_state(p);
299       brw_set_default_mask_control(p, BRW_MASK_DISABLE);
300       brw_set_default_access_mode(p, BRW_ALIGN_1);
301 
302       if (brw_regs_equal(&surface_reg, &sampler_reg)) {
303          brw_MUL(p, addr, sampler_reg, brw_imm_uw(0x101));
304       } else {
305          if (sampler_reg.file == BRW_IMMEDIATE_VALUE) {
306             brw_OR(p, addr, surface_reg, brw_imm_ud(sampler_reg.ud << 8));
307          } else {
308             brw_SHL(p, addr, sampler_reg, brw_imm_ud(8));
309             brw_OR(p, addr, addr, surface_reg);
310          }
311       }
312       brw_AND(p, addr, addr, brw_imm_ud(0xfff));
313 
314       brw_pop_insn_state(p);
315 
316       if (inst->base_mrf != -1)
317          gfx6_resolve_implied_move(p, &src, inst->base_mrf);
318 
319       /* dst = send(offset, a0.0 | <descriptor>) */
320       brw_send_indirect_message(
321          p, BRW_SFID_SAMPLER, dst, src, addr,
322          brw_message_desc(devinfo, inst->mlen, 1, inst->header_size) |
323          brw_sampler_desc(devinfo,
324                           0 /* surface */,
325                           0 /* sampler */,
326                           msg_type,
327                           BRW_SAMPLER_SIMD_MODE_SIMD4X2,
328                           return_format),
329          false /* EOT */);
330 
331       /* visitor knows more than we do about the surface limit required,
332        * so has already done marking.
333        */
334    }
335 }
336 
337 static void
generate_vs_urb_write(struct brw_codegen * p,vec4_instruction * inst)338 generate_vs_urb_write(struct brw_codegen *p, vec4_instruction *inst)
339 {
340    brw_urb_WRITE(p,
341 		 brw_null_reg(), /* dest */
342 		 inst->base_mrf, /* starting mrf reg nr */
343 		 brw_vec8_grf(0, 0), /* src */
344                  inst->urb_write_flags,
345 		 inst->mlen,
346 		 0,		/* response len */
347 		 inst->offset,	/* urb destination offset */
348 		 BRW_URB_SWIZZLE_INTERLEAVE);
349 }
350 
351 static void
generate_gs_urb_write(struct brw_codegen * p,vec4_instruction * inst)352 generate_gs_urb_write(struct brw_codegen *p, vec4_instruction *inst)
353 {
354    struct brw_reg src = brw_message_reg(inst->base_mrf);
355    brw_urb_WRITE(p,
356                  brw_null_reg(), /* dest */
357                  inst->base_mrf, /* starting mrf reg nr */
358                  src,
359                  inst->urb_write_flags,
360                  inst->mlen,
361                  0,             /* response len */
362                  inst->offset,  /* urb destination offset */
363                  BRW_URB_SWIZZLE_INTERLEAVE);
364 }
365 
366 static void
generate_gs_urb_write_allocate(struct brw_codegen * p,vec4_instruction * inst)367 generate_gs_urb_write_allocate(struct brw_codegen *p, vec4_instruction *inst)
368 {
369    struct brw_reg src = brw_message_reg(inst->base_mrf);
370 
371    /* We pass the temporary passed in src0 as the writeback register */
372    brw_urb_WRITE(p,
373                  inst->src[0].as_brw_reg(), /* dest */
374                  inst->base_mrf, /* starting mrf reg nr */
375                  src,
376                  BRW_URB_WRITE_ALLOCATE_COMPLETE,
377                  inst->mlen,
378                  1, /* response len */
379                  inst->offset,  /* urb destination offset */
380                  BRW_URB_SWIZZLE_INTERLEAVE);
381 
382    /* Now put allocated urb handle in dst.0 */
383    brw_push_insn_state(p);
384    brw_set_default_access_mode(p, BRW_ALIGN_1);
385    brw_set_default_mask_control(p, BRW_MASK_DISABLE);
386    brw_MOV(p, get_element_ud(inst->dst.as_brw_reg(), 0),
387            get_element_ud(inst->src[0].as_brw_reg(), 0));
388    brw_pop_insn_state(p);
389 }
390 
391 static void
generate_gs_thread_end(struct brw_codegen * p,vec4_instruction * inst)392 generate_gs_thread_end(struct brw_codegen *p, vec4_instruction *inst)
393 {
394    struct brw_reg src = brw_message_reg(inst->base_mrf);
395    brw_urb_WRITE(p,
396                  brw_null_reg(), /* dest */
397                  inst->base_mrf, /* starting mrf reg nr */
398                  src,
399                  BRW_URB_WRITE_EOT | inst->urb_write_flags,
400                  inst->mlen,
401                  0,              /* response len */
402                  0,              /* urb destination offset */
403                  BRW_URB_SWIZZLE_INTERLEAVE);
404 }
405 
406 static void
generate_gs_set_write_offset(struct brw_codegen * p,struct brw_reg dst,struct brw_reg src0,struct brw_reg src1)407 generate_gs_set_write_offset(struct brw_codegen *p,
408                              struct brw_reg dst,
409                              struct brw_reg src0,
410                              struct brw_reg src1)
411 {
412    /* From p22 of volume 4 part 2 of the Ivy Bridge PRM (2.4.3.1 Message
413     * Header: M0.3):
414     *
415     *     Slot 0 Offset. This field, after adding to the Global Offset field
416     *     in the message descriptor, specifies the offset (in 256-bit units)
417     *     from the start of the URB entry, as referenced by URB Handle 0, at
418     *     which the data will be accessed.
419     *
420     * Similar text describes DWORD M0.4, which is slot 1 offset.
421     *
422     * Therefore, we want to multiply DWORDs 0 and 4 of src0 (the x components
423     * of the register for geometry shader invocations 0 and 1) by the
424     * immediate value in src1, and store the result in DWORDs 3 and 4 of dst.
425     *
426     * We can do this with the following EU instruction:
427     *
428     *     mul(2) dst.3<1>UD src0<8;2,4>UD src1<...>UW   { Align1 WE_all }
429     */
430    brw_push_insn_state(p);
431    brw_set_default_access_mode(p, BRW_ALIGN_1);
432    brw_set_default_mask_control(p, BRW_MASK_DISABLE);
433    assert(p->devinfo->ver >= 7 &&
434           src1.file == BRW_IMMEDIATE_VALUE &&
435           src1.type == BRW_REGISTER_TYPE_UD &&
436           src1.ud <= USHRT_MAX);
437    if (src0.file == BRW_IMMEDIATE_VALUE) {
438       brw_MOV(p, suboffset(stride(dst, 2, 2, 1), 3),
439               brw_imm_ud(src0.ud * src1.ud));
440    } else {
441       if (src1.file == BRW_IMMEDIATE_VALUE) {
442          src1 = brw_imm_uw(src1.ud);
443       }
444       brw_MUL(p, suboffset(stride(dst, 2, 2, 1), 3), stride(src0, 8, 2, 4),
445               retype(src1, BRW_REGISTER_TYPE_UW));
446    }
447    brw_pop_insn_state(p);
448 }
449 
450 static void
generate_gs_set_vertex_count(struct brw_codegen * p,struct brw_reg dst,struct brw_reg src)451 generate_gs_set_vertex_count(struct brw_codegen *p,
452                              struct brw_reg dst,
453                              struct brw_reg src)
454 {
455    brw_push_insn_state(p);
456    brw_set_default_mask_control(p, BRW_MASK_DISABLE);
457 
458    /* If we think of the src and dst registers as composed of 8 DWORDs each,
459     * we want to pick up the contents of DWORDs 0 and 4 from src, truncate
460     * them to WORDs, and then pack them into DWORD 2 of dst.
461     *
462     * It's easier to get the EU to do this if we think of the src and dst
463     * registers as composed of 16 WORDS each; then, we want to pick up the
464     * contents of WORDs 0 and 8 from src, and pack them into WORDs 4 and 5
465     * of dst.
466     *
467     * We can do that by the following EU instruction:
468     *
469     *     mov (2) dst.4<1>:uw src<8;1,0>:uw   { Align1, Q1, NoMask }
470     */
471    brw_set_default_access_mode(p, BRW_ALIGN_1);
472    brw_MOV(p,
473            suboffset(stride(retype(dst, BRW_REGISTER_TYPE_UW), 2, 2, 1), 4),
474            stride(retype(src, BRW_REGISTER_TYPE_UW), 8, 1, 0));
475 
476    brw_pop_insn_state(p);
477 }
478 
479 static void
generate_gs_svb_write(struct brw_codegen * p,vec4_instruction * inst,struct brw_reg dst,struct brw_reg src0,struct brw_reg src1)480 generate_gs_svb_write(struct brw_codegen *p,
481                       vec4_instruction *inst,
482                       struct brw_reg dst,
483                       struct brw_reg src0,
484                       struct brw_reg src1)
485 {
486    int binding = inst->sol_binding;
487    bool final_write = inst->sol_final_write;
488 
489    brw_push_insn_state(p);
490    brw_set_default_exec_size(p, BRW_EXECUTE_4);
491    /* Copy Vertex data into M0.x */
492    brw_MOV(p, stride(dst, 4, 4, 1),
493            stride(retype(src0, BRW_REGISTER_TYPE_UD), 4, 4, 1));
494    brw_pop_insn_state(p);
495 
496    brw_push_insn_state(p);
497    /* Send SVB Write */
498    brw_svb_write(p,
499                  final_write ? src1 : brw_null_reg(), /* dest == src1 */
500                  1, /* msg_reg_nr */
501                  dst, /* src0 == previous dst */
502                  BRW_GFX6_SOL_BINDING_START + binding, /* binding_table_index */
503                  final_write); /* send_commit_msg */
504 
505    /* Finally, wait for the write commit to occur so that we can proceed to
506     * other things safely.
507     *
508     * From the Sandybridge PRM, Volume 4, Part 1, Section 3.3:
509     *
510     *   The write commit does not modify the destination register, but
511     *   merely clears the dependency associated with the destination
512     *   register. Thus, a simple “mov” instruction using the register as a
513     *   source is sufficient to wait for the write commit to occur.
514     */
515    if (final_write) {
516       brw_MOV(p, src1, src1);
517    }
518    brw_pop_insn_state(p);
519 }
520 
521 static void
generate_gs_svb_set_destination_index(struct brw_codegen * p,vec4_instruction * inst,struct brw_reg dst,struct brw_reg src)522 generate_gs_svb_set_destination_index(struct brw_codegen *p,
523                                       vec4_instruction *inst,
524                                       struct brw_reg dst,
525                                       struct brw_reg src)
526 {
527    int vertex = inst->sol_vertex;
528    brw_push_insn_state(p);
529    brw_set_default_access_mode(p, BRW_ALIGN_1);
530    brw_set_default_mask_control(p, BRW_MASK_DISABLE);
531    brw_MOV(p, get_element_ud(dst, 5), get_element_ud(src, vertex));
532    brw_pop_insn_state(p);
533 }
534 
535 static void
generate_gs_set_dword_2(struct brw_codegen * p,struct brw_reg dst,struct brw_reg src)536 generate_gs_set_dword_2(struct brw_codegen *p,
537                         struct brw_reg dst,
538                         struct brw_reg src)
539 {
540    brw_push_insn_state(p);
541    brw_set_default_access_mode(p, BRW_ALIGN_1);
542    brw_set_default_mask_control(p, BRW_MASK_DISABLE);
543    brw_MOV(p, suboffset(vec1(dst), 2), suboffset(vec1(src), 0));
544    brw_pop_insn_state(p);
545 }
546 
547 static void
generate_gs_prepare_channel_masks(struct brw_codegen * p,struct brw_reg dst)548 generate_gs_prepare_channel_masks(struct brw_codegen *p,
549                                   struct brw_reg dst)
550 {
551    /* We want to left shift just DWORD 4 (the x component belonging to the
552     * second geometry shader invocation) by 4 bits.  So generate the
553     * instruction:
554     *
555     *     shl(1) dst.4<1>UD dst.4<0,1,0>UD 4UD { align1 WE_all }
556     */
557    dst = suboffset(vec1(dst), 4);
558    brw_push_insn_state(p);
559    brw_set_default_access_mode(p, BRW_ALIGN_1);
560    brw_set_default_mask_control(p, BRW_MASK_DISABLE);
561    brw_SHL(p, dst, dst, brw_imm_ud(4));
562    brw_pop_insn_state(p);
563 }
564 
565 static void
generate_gs_set_channel_masks(struct brw_codegen * p,struct brw_reg dst,struct brw_reg src)566 generate_gs_set_channel_masks(struct brw_codegen *p,
567                               struct brw_reg dst,
568                               struct brw_reg src)
569 {
570    /* From p21 of volume 4 part 2 of the Ivy Bridge PRM (2.4.3.1 Message
571     * Header: M0.5):
572     *
573     *     15 Vertex 1 DATA [3] / Vertex 0 DATA[7] Channel Mask
574     *
575     *        When Swizzle Control = URB_INTERLEAVED this bit controls Vertex 1
576     *        DATA[3], when Swizzle Control = URB_NOSWIZZLE this bit controls
577     *        Vertex 0 DATA[7].  This bit is ANDed with the corresponding
578     *        channel enable to determine the final channel enable.  For the
579     *        URB_READ_OWORD & URB_READ_HWORD messages, when final channel
580     *        enable is 1 it indicates that Vertex 1 DATA [3] will be included
581     *        in the writeback message.  For the URB_WRITE_OWORD &
582     *        URB_WRITE_HWORD messages, when final channel enable is 1 it
583     *        indicates that Vertex 1 DATA [3] will be written to the surface.
584     *
585     *        0: Vertex 1 DATA [3] / Vertex 0 DATA[7] channel not included
586     *        1: Vertex DATA [3] / Vertex 0 DATA[7] channel included
587     *
588     *     14 Vertex 1 DATA [2] Channel Mask
589     *     13 Vertex 1 DATA [1] Channel Mask
590     *     12 Vertex 1 DATA [0] Channel Mask
591     *     11 Vertex 0 DATA [3] Channel Mask
592     *     10 Vertex 0 DATA [2] Channel Mask
593     *      9 Vertex 0 DATA [1] Channel Mask
594     *      8 Vertex 0 DATA [0] Channel Mask
595     *
596     * (This is from a section of the PRM that is agnostic to the particular
597     * type of shader being executed, so "Vertex 0" and "Vertex 1" refer to
598     * geometry shader invocations 0 and 1, respectively).  Since we have the
599     * enable flags for geometry shader invocation 0 in bits 3:0 of DWORD 0,
600     * and the enable flags for geometry shader invocation 1 in bits 7:0 of
601     * DWORD 4, we just need to OR them together and store the result in bits
602     * 15:8 of DWORD 5.
603     *
604     * It's easier to get the EU to do this if we think of the src and dst
605     * registers as composed of 32 bytes each; then, we want to pick up the
606     * contents of bytes 0 and 16 from src, OR them together, and store them in
607     * byte 21.
608     *
609     * We can do that by the following EU instruction:
610     *
611     *     or(1) dst.21<1>UB src<0,1,0>UB src.16<0,1,0>UB { align1 WE_all }
612     *
613     * Note: this relies on the source register having zeros in (a) bits 7:4 of
614     * DWORD 0 and (b) bits 3:0 of DWORD 4.  We can rely on (b) because the
615     * source register was prepared by GS_OPCODE_PREPARE_CHANNEL_MASKS (which
616     * shifts DWORD 4 left by 4 bits), and we can rely on (a) because prior to
617     * the execution of GS_OPCODE_PREPARE_CHANNEL_MASKS, DWORDs 0 and 4 need to
618     * contain valid channel mask values (which are in the range 0x0-0xf).
619     */
620    dst = retype(dst, BRW_REGISTER_TYPE_UB);
621    src = retype(src, BRW_REGISTER_TYPE_UB);
622    brw_push_insn_state(p);
623    brw_set_default_access_mode(p, BRW_ALIGN_1);
624    brw_set_default_mask_control(p, BRW_MASK_DISABLE);
625    brw_OR(p, suboffset(vec1(dst), 21), vec1(src), suboffset(vec1(src), 16));
626    brw_pop_insn_state(p);
627 }
628 
629 static void
generate_gs_get_instance_id(struct brw_codegen * p,struct brw_reg dst)630 generate_gs_get_instance_id(struct brw_codegen *p,
631                             struct brw_reg dst)
632 {
633    /* We want to right shift R0.0 & R0.1 by GFX7_GS_PAYLOAD_INSTANCE_ID_SHIFT
634     * and store into dst.0 & dst.4. So generate the instruction:
635     *
636     *     shr(8) dst<1> R0<1,4,0> GFX7_GS_PAYLOAD_INSTANCE_ID_SHIFT { align1 WE_normal 1Q }
637     */
638    brw_push_insn_state(p);
639    brw_set_default_access_mode(p, BRW_ALIGN_1);
640    dst = retype(dst, BRW_REGISTER_TYPE_UD);
641    struct brw_reg r0(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
642    brw_SHR(p, dst, stride(r0, 1, 4, 0),
643            brw_imm_ud(GFX7_GS_PAYLOAD_INSTANCE_ID_SHIFT));
644    brw_pop_insn_state(p);
645 }
646 
647 static void
generate_gs_ff_sync_set_primitives(struct brw_codegen * p,struct brw_reg dst,struct brw_reg src0,struct brw_reg src1,struct brw_reg src2)648 generate_gs_ff_sync_set_primitives(struct brw_codegen *p,
649                                    struct brw_reg dst,
650                                    struct brw_reg src0,
651                                    struct brw_reg src1,
652                                    struct brw_reg src2)
653 {
654    brw_push_insn_state(p);
655    brw_set_default_access_mode(p, BRW_ALIGN_1);
656    /* Save src0 data in 16:31 bits of dst.0 */
657    brw_AND(p, suboffset(vec1(dst), 0), suboffset(vec1(src0), 0),
658            brw_imm_ud(0xffffu));
659    brw_SHL(p, suboffset(vec1(dst), 0), suboffset(vec1(dst), 0), brw_imm_ud(16));
660    /* Save src1 data in 0:15 bits of dst.0 */
661    brw_AND(p, suboffset(vec1(src2), 0), suboffset(vec1(src1), 0),
662            brw_imm_ud(0xffffu));
663    brw_OR(p, suboffset(vec1(dst), 0),
664           suboffset(vec1(dst), 0),
665           suboffset(vec1(src2), 0));
666    brw_pop_insn_state(p);
667 }
668 
669 static void
generate_gs_ff_sync(struct brw_codegen * p,vec4_instruction * inst,struct brw_reg dst,struct brw_reg src0,struct brw_reg src1)670 generate_gs_ff_sync(struct brw_codegen *p,
671                     vec4_instruction *inst,
672                     struct brw_reg dst,
673                     struct brw_reg src0,
674                     struct brw_reg src1)
675 {
676    /* This opcode uses an implied MRF register for:
677     *  - the header of the ff_sync message. And as such it is expected to be
678     *    initialized to r0 before calling here.
679     *  - the destination where we will write the allocated URB handle.
680     */
681    struct brw_reg header =
682       retype(brw_message_reg(inst->base_mrf), BRW_REGISTER_TYPE_UD);
683 
684    /* Overwrite dword 0 of the header (SO vertices to write) and
685     * dword 1 (number of primitives written).
686     */
687    brw_push_insn_state(p);
688    brw_set_default_mask_control(p, BRW_MASK_DISABLE);
689    brw_set_default_access_mode(p, BRW_ALIGN_1);
690    brw_MOV(p, get_element_ud(header, 0), get_element_ud(src1, 0));
691    brw_MOV(p, get_element_ud(header, 1), get_element_ud(src0, 0));
692    brw_pop_insn_state(p);
693 
694    /* Allocate URB handle in dst */
695    brw_ff_sync(p,
696                dst,
697                0,
698                header,
699                1, /* allocate */
700                1, /* response length */
701                0 /* eot */);
702 
703    /* Now put allocated urb handle in header.0 */
704    brw_push_insn_state(p);
705    brw_set_default_access_mode(p, BRW_ALIGN_1);
706    brw_set_default_mask_control(p, BRW_MASK_DISABLE);
707    brw_MOV(p, get_element_ud(header, 0), get_element_ud(dst, 0));
708 
709    /* src1 is not an immediate when we use transform feedback */
710    if (src1.file != BRW_IMMEDIATE_VALUE) {
711       brw_set_default_exec_size(p, BRW_EXECUTE_4);
712       brw_MOV(p, brw_vec4_grf(src1.nr, 0), brw_vec4_grf(dst.nr, 1));
713    }
714 
715    brw_pop_insn_state(p);
716 }
717 
718 static void
generate_gs_set_primitive_id(struct brw_codegen * p,struct brw_reg dst)719 generate_gs_set_primitive_id(struct brw_codegen *p, struct brw_reg dst)
720 {
721    /* In gfx6, PrimitiveID is delivered in R0.1 of the payload */
722    struct brw_reg src = brw_vec8_grf(0, 0);
723    brw_push_insn_state(p);
724    brw_set_default_mask_control(p, BRW_MASK_DISABLE);
725    brw_set_default_access_mode(p, BRW_ALIGN_1);
726    brw_MOV(p, get_element_ud(dst, 0), get_element_ud(src, 1));
727    brw_pop_insn_state(p);
728 }
729 
730 static void
generate_tcs_get_instance_id(struct brw_codegen * p,struct brw_reg dst)731 generate_tcs_get_instance_id(struct brw_codegen *p, struct brw_reg dst)
732 {
733    const struct intel_device_info *devinfo = p->devinfo;
734    const bool ivb = devinfo->platform == INTEL_PLATFORM_IVB ||
735                     devinfo->platform == INTEL_PLATFORM_BYT;
736 
737    /* "Instance Count" comes as part of the payload in r0.2 bits 23:17.
738     *
739     * Since we operate in SIMD4x2 mode, we need run half as many threads
740     * as necessary.  So we assign (2i + 1, 2i) as the thread counts.  We
741     * shift right by one less to accomplish the multiplication by two.
742     */
743    dst = retype(dst, BRW_REGISTER_TYPE_UD);
744    struct brw_reg r0(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
745 
746    brw_push_insn_state(p);
747    brw_set_default_access_mode(p, BRW_ALIGN_1);
748 
749    const int mask = ivb ? INTEL_MASK(22, 16) : INTEL_MASK(23, 17);
750    const int shift = ivb ? 16 : 17;
751 
752    brw_AND(p, get_element_ud(dst, 0), get_element_ud(r0, 2), brw_imm_ud(mask));
753    brw_SHR(p, get_element_ud(dst, 0), get_element_ud(dst, 0),
754            brw_imm_ud(shift - 1));
755    brw_ADD(p, get_element_ud(dst, 4), get_element_ud(dst, 0), brw_imm_ud(1));
756 
757    brw_pop_insn_state(p);
758 }
759 
760 static void
generate_tcs_urb_write(struct brw_codegen * p,vec4_instruction * inst,struct brw_reg urb_header)761 generate_tcs_urb_write(struct brw_codegen *p,
762                        vec4_instruction *inst,
763                        struct brw_reg urb_header)
764 {
765    const struct intel_device_info *devinfo = p->devinfo;
766 
767    brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
768    brw_set_dest(p, send, brw_null_reg());
769    brw_set_src0(p, send, urb_header);
770    brw_set_desc(p, send, brw_message_desc(devinfo, inst->mlen, 0, true));
771 
772    brw_inst_set_sfid(devinfo, send, BRW_SFID_URB);
773    brw_inst_set_urb_opcode(devinfo, send, BRW_URB_OPCODE_WRITE_OWORD);
774    brw_inst_set_urb_global_offset(devinfo, send, inst->offset);
775    if (inst->urb_write_flags & BRW_URB_WRITE_EOT) {
776       brw_inst_set_eot(devinfo, send, 1);
777    } else {
778       brw_inst_set_urb_per_slot_offset(devinfo, send, 1);
779       brw_inst_set_urb_swizzle_control(devinfo, send, BRW_URB_SWIZZLE_INTERLEAVE);
780    }
781 
782    /* what happens to swizzles? */
783 }
784 
785 
786 static void
generate_tcs_input_urb_offsets(struct brw_codegen * p,struct brw_reg dst,struct brw_reg vertex,struct brw_reg offset)787 generate_tcs_input_urb_offsets(struct brw_codegen *p,
788                                struct brw_reg dst,
789                                struct brw_reg vertex,
790                                struct brw_reg offset)
791 {
792    /* Generates an URB read/write message header for HS/DS operation.
793     * Inputs are a vertex index, and a byte offset from the beginning of
794     * the vertex. */
795 
796    /* If `vertex` is not an immediate, we clobber a0.0 */
797 
798    assert(vertex.file == BRW_IMMEDIATE_VALUE || vertex.file == BRW_GENERAL_REGISTER_FILE);
799    assert(vertex.type == BRW_REGISTER_TYPE_UD || vertex.type == BRW_REGISTER_TYPE_D);
800 
801    assert(dst.file == BRW_GENERAL_REGISTER_FILE);
802 
803    brw_push_insn_state(p);
804    brw_set_default_access_mode(p, BRW_ALIGN_1);
805    brw_set_default_mask_control(p, BRW_MASK_DISABLE);
806    brw_MOV(p, dst, brw_imm_ud(0));
807 
808    /* m0.5 bits 8-15 are channel enables */
809    brw_MOV(p, get_element_ud(dst, 5), brw_imm_ud(0xff00));
810 
811    /* m0.0-0.1: URB handles */
812    if (vertex.file == BRW_IMMEDIATE_VALUE) {
813       uint32_t vertex_index = vertex.ud;
814       struct brw_reg index_reg = brw_vec1_grf(
815             1 + (vertex_index >> 3), vertex_index & 7);
816 
817       brw_MOV(p, vec2(get_element_ud(dst, 0)),
818               retype(index_reg, BRW_REGISTER_TYPE_UD));
819    } else {
820       /* Use indirect addressing.  ICP Handles are DWords (single channels
821        * of a register) and start at g1.0.
822        *
823        * In order to start our region at g1.0, we add 8 to the vertex index,
824        * effectively skipping over the 8 channels in g0.0.  This gives us a
825        * DWord offset to the ICP Handle.
826        *
827        * Indirect addressing works in terms of bytes, so we then multiply
828        * the DWord offset by 4 (by shifting left by 2).
829        */
830       struct brw_reg addr = brw_address_reg(0);
831 
832       /* bottom half: m0.0 = g[1.0 + vertex.0]UD */
833       brw_ADD(p, addr, retype(get_element_ud(vertex, 0), BRW_REGISTER_TYPE_UW),
834               brw_imm_uw(0x8));
835       brw_SHL(p, addr, addr, brw_imm_uw(2));
836       brw_MOV(p, get_element_ud(dst, 0), deref_1ud(brw_indirect(0, 0), 0));
837 
838       /* top half: m0.1 = g[1.0 + vertex.4]UD */
839       brw_ADD(p, addr, retype(get_element_ud(vertex, 4), BRW_REGISTER_TYPE_UW),
840               brw_imm_uw(0x8));
841       brw_SHL(p, addr, addr, brw_imm_uw(2));
842       brw_MOV(p, get_element_ud(dst, 1), deref_1ud(brw_indirect(0, 0), 0));
843    }
844 
845    /* m0.3-0.4: 128bit-granular offsets into the URB from the handles */
846    if (offset.file != ARF)
847       brw_MOV(p, vec2(get_element_ud(dst, 3)), stride(offset, 4, 1, 0));
848 
849    brw_pop_insn_state(p);
850 }
851 
852 
853 static void
generate_tcs_output_urb_offsets(struct brw_codegen * p,struct brw_reg dst,struct brw_reg write_mask,struct brw_reg offset)854 generate_tcs_output_urb_offsets(struct brw_codegen *p,
855                                 struct brw_reg dst,
856                                 struct brw_reg write_mask,
857                                 struct brw_reg offset)
858 {
859    /* Generates an URB read/write message header for HS/DS operation, for the patch URB entry. */
860    assert(dst.file == BRW_GENERAL_REGISTER_FILE || dst.file == BRW_MESSAGE_REGISTER_FILE);
861 
862    assert(write_mask.file == BRW_IMMEDIATE_VALUE);
863    assert(write_mask.type == BRW_REGISTER_TYPE_UD);
864 
865    brw_push_insn_state(p);
866 
867    brw_set_default_access_mode(p, BRW_ALIGN_1);
868    brw_set_default_mask_control(p, BRW_MASK_DISABLE);
869    brw_MOV(p, dst, brw_imm_ud(0));
870 
871    unsigned mask = write_mask.ud;
872 
873    /* m0.5 bits 15:12 and 11:8 are channel enables */
874    brw_MOV(p, get_element_ud(dst, 5), brw_imm_ud((mask << 8) | (mask << 12)));
875 
876    /* HS patch URB handle is delivered in r0.0 */
877    struct brw_reg urb_handle = brw_vec1_grf(0, 0);
878 
879    /* m0.0-0.1: URB handles */
880    brw_MOV(p, vec2(get_element_ud(dst, 0)),
881            retype(urb_handle, BRW_REGISTER_TYPE_UD));
882 
883    /* m0.3-0.4: 128bit-granular offsets into the URB from the handles */
884    if (offset.file != ARF)
885       brw_MOV(p, vec2(get_element_ud(dst, 3)), stride(offset, 4, 1, 0));
886 
887    brw_pop_insn_state(p);
888 }
889 
890 static void
generate_tes_create_input_read_header(struct brw_codegen * p,struct brw_reg dst)891 generate_tes_create_input_read_header(struct brw_codegen *p,
892                                       struct brw_reg dst)
893 {
894    brw_push_insn_state(p);
895    brw_set_default_access_mode(p, BRW_ALIGN_1);
896    brw_set_default_mask_control(p, BRW_MASK_DISABLE);
897 
898    /* Initialize the register to 0 */
899    brw_MOV(p, dst, brw_imm_ud(0));
900 
901    /* Enable all the channels in m0.5 bits 15:8 */
902    brw_MOV(p, get_element_ud(dst, 5), brw_imm_ud(0xff00));
903 
904    /* Copy g1.3 (the patch URB handle) to m0.0 and m0.1.  For safety,
905     * mask out irrelevant "Reserved" bits, as they're not marked MBZ.
906     */
907    brw_AND(p, vec2(get_element_ud(dst, 0)),
908            retype(brw_vec1_grf(1, 3), BRW_REGISTER_TYPE_UD),
909            brw_imm_ud(0x1fff));
910    brw_pop_insn_state(p);
911 }
912 
913 static void
generate_tes_add_indirect_urb_offset(struct brw_codegen * p,struct brw_reg dst,struct brw_reg header,struct brw_reg offset)914 generate_tes_add_indirect_urb_offset(struct brw_codegen *p,
915                                      struct brw_reg dst,
916                                      struct brw_reg header,
917                                      struct brw_reg offset)
918 {
919    brw_push_insn_state(p);
920    brw_set_default_access_mode(p, BRW_ALIGN_1);
921    brw_set_default_mask_control(p, BRW_MASK_DISABLE);
922 
923    brw_MOV(p, dst, header);
924 
925    /* Uniforms will have a stride <0;4,1>, and we need to convert to <0;1,0>.
926     * Other values get <4;1,0>.
927     */
928    struct brw_reg restrided_offset;
929    if (offset.vstride == BRW_VERTICAL_STRIDE_0 &&
930        offset.width == BRW_WIDTH_4 &&
931        offset.hstride == BRW_HORIZONTAL_STRIDE_1) {
932       restrided_offset = stride(offset, 0, 1, 0);
933    } else {
934       restrided_offset = stride(offset, 4, 1, 0);
935    }
936 
937    /* m0.3-0.4: 128-bit-granular offsets into the URB from the handles */
938    brw_MOV(p, vec2(get_element_ud(dst, 3)), restrided_offset);
939 
940    brw_pop_insn_state(p);
941 }
942 
943 static void
generate_vec4_urb_read(struct brw_codegen * p,vec4_instruction * inst,struct brw_reg dst,struct brw_reg header)944 generate_vec4_urb_read(struct brw_codegen *p,
945                        vec4_instruction *inst,
946                        struct brw_reg dst,
947                        struct brw_reg header)
948 {
949    const struct intel_device_info *devinfo = p->devinfo;
950 
951    assert(header.file == BRW_GENERAL_REGISTER_FILE);
952    assert(header.type == BRW_REGISTER_TYPE_UD);
953 
954    brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
955    brw_set_dest(p, send, dst);
956    brw_set_src0(p, send, header);
957 
958    brw_set_desc(p, send, brw_message_desc(devinfo, 1, 1, true));
959 
960    brw_inst_set_sfid(devinfo, send, BRW_SFID_URB);
961    brw_inst_set_urb_opcode(devinfo, send, BRW_URB_OPCODE_READ_OWORD);
962    brw_inst_set_urb_swizzle_control(devinfo, send, BRW_URB_SWIZZLE_INTERLEAVE);
963    brw_inst_set_urb_per_slot_offset(devinfo, send, 1);
964 
965    brw_inst_set_urb_global_offset(devinfo, send, inst->offset);
966 }
967 
968 static void
generate_tcs_release_input(struct brw_codegen * p,struct brw_reg header,struct brw_reg vertex,struct brw_reg is_unpaired)969 generate_tcs_release_input(struct brw_codegen *p,
970                            struct brw_reg header,
971                            struct brw_reg vertex,
972                            struct brw_reg is_unpaired)
973 {
974    const struct intel_device_info *devinfo = p->devinfo;
975 
976    assert(vertex.file == BRW_IMMEDIATE_VALUE);
977    assert(vertex.type == BRW_REGISTER_TYPE_UD);
978 
979    /* m0.0-0.1: URB handles */
980    struct brw_reg urb_handles =
981       retype(brw_vec2_grf(1 + (vertex.ud >> 3), vertex.ud & 7),
982              BRW_REGISTER_TYPE_UD);
983 
984    brw_push_insn_state(p);
985    brw_set_default_access_mode(p, BRW_ALIGN_1);
986    brw_set_default_mask_control(p, BRW_MASK_DISABLE);
987    brw_MOV(p, header, brw_imm_ud(0));
988    brw_MOV(p, vec2(get_element_ud(header, 0)), urb_handles);
989    brw_pop_insn_state(p);
990 
991    brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
992    brw_set_dest(p, send, brw_null_reg());
993    brw_set_src0(p, send, header);
994    brw_set_desc(p, send, brw_message_desc(devinfo, 1, 0, true));
995 
996    brw_inst_set_sfid(devinfo, send, BRW_SFID_URB);
997    brw_inst_set_urb_opcode(devinfo, send, BRW_URB_OPCODE_READ_OWORD);
998    brw_inst_set_urb_complete(devinfo, send, 1);
999    brw_inst_set_urb_swizzle_control(devinfo, send, is_unpaired.ud ?
1000                                     BRW_URB_SWIZZLE_NONE :
1001                                     BRW_URB_SWIZZLE_INTERLEAVE);
1002 }
1003 
1004 static void
generate_tcs_thread_end(struct brw_codegen * p,vec4_instruction * inst)1005 generate_tcs_thread_end(struct brw_codegen *p, vec4_instruction *inst)
1006 {
1007    struct brw_reg header = brw_message_reg(inst->base_mrf);
1008 
1009    brw_push_insn_state(p);
1010    brw_set_default_access_mode(p, BRW_ALIGN_1);
1011    brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1012    brw_MOV(p, header, brw_imm_ud(0));
1013    brw_MOV(p, get_element_ud(header, 5), brw_imm_ud(WRITEMASK_X << 8));
1014    brw_MOV(p, get_element_ud(header, 0),
1015            retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD));
1016    brw_MOV(p, brw_message_reg(inst->base_mrf + 1), brw_imm_ud(0u));
1017    brw_pop_insn_state(p);
1018 
1019    brw_urb_WRITE(p,
1020                  brw_null_reg(), /* dest */
1021                  inst->base_mrf, /* starting mrf reg nr */
1022                  header,
1023                  BRW_URB_WRITE_EOT | BRW_URB_WRITE_OWORD |
1024                  BRW_URB_WRITE_USE_CHANNEL_MASKS,
1025                  inst->mlen,
1026                  0,              /* response len */
1027                  0,              /* urb destination offset */
1028                  0);
1029 }
1030 
1031 static void
generate_tes_get_primitive_id(struct brw_codegen * p,struct brw_reg dst)1032 generate_tes_get_primitive_id(struct brw_codegen *p, struct brw_reg dst)
1033 {
1034    brw_push_insn_state(p);
1035    brw_set_default_access_mode(p, BRW_ALIGN_1);
1036    brw_MOV(p, dst, retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_D));
1037    brw_pop_insn_state(p);
1038 }
1039 
1040 static void
generate_tcs_get_primitive_id(struct brw_codegen * p,struct brw_reg dst)1041 generate_tcs_get_primitive_id(struct brw_codegen *p, struct brw_reg dst)
1042 {
1043    brw_push_insn_state(p);
1044    brw_set_default_access_mode(p, BRW_ALIGN_1);
1045    brw_MOV(p, dst, retype(brw_vec1_grf(0, 1), BRW_REGISTER_TYPE_UD));
1046    brw_pop_insn_state(p);
1047 }
1048 
1049 static void
generate_tcs_create_barrier_header(struct brw_codegen * p,struct brw_vue_prog_data * prog_data,struct brw_reg dst)1050 generate_tcs_create_barrier_header(struct brw_codegen *p,
1051                                    struct brw_vue_prog_data *prog_data,
1052                                    struct brw_reg dst)
1053 {
1054    const struct intel_device_info *devinfo = p->devinfo;
1055    const bool ivb = devinfo->platform == INTEL_PLATFORM_IVB ||
1056                     devinfo->platform == INTEL_PLATFORM_BYT;
1057    struct brw_reg m0_2 = get_element_ud(dst, 2);
1058    unsigned instances = ((struct brw_tcs_prog_data *) prog_data)->instances;
1059 
1060    brw_push_insn_state(p);
1061    brw_set_default_access_mode(p, BRW_ALIGN_1);
1062    brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1063 
1064    /* Zero the message header */
1065    brw_MOV(p, retype(dst, BRW_REGISTER_TYPE_UD), brw_imm_ud(0u));
1066 
1067    /* Copy "Barrier ID" from r0.2, bits 16:13 (Gfx7.5+) or 15:12 (Gfx7) */
1068    brw_AND(p, m0_2,
1069            retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD),
1070            brw_imm_ud(ivb ? INTEL_MASK(15, 12) : INTEL_MASK(16, 13)));
1071 
1072    /* Shift it up to bits 27:24. */
1073    brw_SHL(p, m0_2, get_element_ud(dst, 2), brw_imm_ud(ivb ? 12 : 11));
1074 
1075    /* Set the Barrier Count and the enable bit */
1076    brw_OR(p, m0_2, m0_2, brw_imm_ud(instances << 9 | (1 << 15)));
1077 
1078    brw_pop_insn_state(p);
1079 }
1080 
1081 static void
generate_oword_dual_block_offsets(struct brw_codegen * p,struct brw_reg m1,struct brw_reg index)1082 generate_oword_dual_block_offsets(struct brw_codegen *p,
1083                                   struct brw_reg m1,
1084                                   struct brw_reg index)
1085 {
1086    int second_vertex_offset;
1087 
1088    if (p->devinfo->ver >= 6)
1089       second_vertex_offset = 1;
1090    else
1091       second_vertex_offset = 16;
1092 
1093    m1 = retype(m1, BRW_REGISTER_TYPE_D);
1094 
1095    /* Set up M1 (message payload).  Only the block offsets in M1.0 and
1096     * M1.4 are used, and the rest are ignored.
1097     */
1098    struct brw_reg m1_0 = suboffset(vec1(m1), 0);
1099    struct brw_reg m1_4 = suboffset(vec1(m1), 4);
1100    struct brw_reg index_0 = suboffset(vec1(index), 0);
1101    struct brw_reg index_4 = suboffset(vec1(index), 4);
1102 
1103    brw_push_insn_state(p);
1104    brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1105    brw_set_default_access_mode(p, BRW_ALIGN_1);
1106 
1107    brw_MOV(p, m1_0, index_0);
1108 
1109    if (index.file == BRW_IMMEDIATE_VALUE) {
1110       index_4.ud += second_vertex_offset;
1111       brw_MOV(p, m1_4, index_4);
1112    } else {
1113       brw_ADD(p, m1_4, index_4, brw_imm_d(second_vertex_offset));
1114    }
1115 
1116    brw_pop_insn_state(p);
1117 }
1118 
1119 static void
generate_unpack_flags(struct brw_codegen * p,struct brw_reg dst)1120 generate_unpack_flags(struct brw_codegen *p,
1121                       struct brw_reg dst)
1122 {
1123    brw_push_insn_state(p);
1124    brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1125    brw_set_default_access_mode(p, BRW_ALIGN_1);
1126 
1127    struct brw_reg flags = brw_flag_reg(0, 0);
1128    struct brw_reg dst_0 = suboffset(vec1(dst), 0);
1129    struct brw_reg dst_4 = suboffset(vec1(dst), 4);
1130 
1131    brw_AND(p, dst_0, flags, brw_imm_ud(0x0f));
1132    brw_AND(p, dst_4, flags, brw_imm_ud(0xf0));
1133    brw_SHR(p, dst_4, dst_4, brw_imm_ud(4));
1134 
1135    brw_pop_insn_state(p);
1136 }
1137 
1138 static void
generate_scratch_read(struct brw_codegen * p,vec4_instruction * inst,struct brw_reg dst,struct brw_reg index)1139 generate_scratch_read(struct brw_codegen *p,
1140                       vec4_instruction *inst,
1141                       struct brw_reg dst,
1142                       struct brw_reg index)
1143 {
1144    const struct intel_device_info *devinfo = p->devinfo;
1145    struct brw_reg header = brw_vec8_grf(0, 0);
1146 
1147    gfx6_resolve_implied_move(p, &header, inst->base_mrf);
1148 
1149    generate_oword_dual_block_offsets(p, brw_message_reg(inst->base_mrf + 1),
1150 				     index);
1151 
1152    uint32_t msg_type;
1153 
1154    if (devinfo->ver >= 6)
1155       msg_type = GFX6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
1156    else if (devinfo->verx10 >= 45)
1157       msg_type = G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
1158    else
1159       msg_type = BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
1160 
1161    const unsigned target_cache =
1162       devinfo->ver >= 7 ? GFX7_SFID_DATAPORT_DATA_CACHE :
1163       devinfo->ver >= 6 ? GFX6_SFID_DATAPORT_RENDER_CACHE :
1164       BRW_SFID_DATAPORT_READ;
1165 
1166    /* Each of the 8 channel enables is considered for whether each
1167     * dword is written.
1168     */
1169    brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
1170    brw_inst_set_sfid(devinfo, send, target_cache);
1171    brw_set_dest(p, send, dst);
1172    brw_set_src0(p, send, header);
1173    if (devinfo->ver < 6)
1174       brw_inst_set_cond_modifier(devinfo, send, inst->base_mrf);
1175    brw_set_desc(p, send,
1176                 brw_message_desc(devinfo, 2, 1, true) |
1177                 brw_dp_read_desc(devinfo,
1178                                  brw_scratch_surface_idx(p),
1179                                  BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD,
1180                                  msg_type, BRW_DATAPORT_READ_TARGET_RENDER_CACHE));
1181 }
1182 
1183 static void
generate_scratch_write(struct brw_codegen * p,vec4_instruction * inst,struct brw_reg dst,struct brw_reg src,struct brw_reg index)1184 generate_scratch_write(struct brw_codegen *p,
1185                        vec4_instruction *inst,
1186                        struct brw_reg dst,
1187                        struct brw_reg src,
1188                        struct brw_reg index)
1189 {
1190    const struct intel_device_info *devinfo = p->devinfo;
1191    const unsigned target_cache =
1192       (devinfo->ver >= 7 ? GFX7_SFID_DATAPORT_DATA_CACHE :
1193        devinfo->ver >= 6 ? GFX6_SFID_DATAPORT_RENDER_CACHE :
1194        BRW_SFID_DATAPORT_WRITE);
1195    struct brw_reg header = brw_vec8_grf(0, 0);
1196    bool write_commit;
1197 
1198    /* If the instruction is predicated, we'll predicate the send, not
1199     * the header setup.
1200     */
1201    brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
1202 
1203    gfx6_resolve_implied_move(p, &header, inst->base_mrf);
1204 
1205    generate_oword_dual_block_offsets(p, brw_message_reg(inst->base_mrf + 1),
1206 				     index);
1207 
1208    brw_MOV(p,
1209 	   retype(brw_message_reg(inst->base_mrf + 2), BRW_REGISTER_TYPE_D),
1210 	   retype(src, BRW_REGISTER_TYPE_D));
1211 
1212    uint32_t msg_type;
1213 
1214    if (devinfo->ver >= 7)
1215       msg_type = GFX7_DATAPORT_DC_OWORD_DUAL_BLOCK_WRITE;
1216    else if (devinfo->ver == 6)
1217       msg_type = GFX6_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE;
1218    else
1219       msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE;
1220 
1221    brw_set_default_predicate_control(p, inst->predicate);
1222 
1223    /* Pre-gfx6, we have to specify write commits to ensure ordering
1224     * between reads and writes within a thread.  Afterwards, that's
1225     * guaranteed and write commits only matter for inter-thread
1226     * synchronization.
1227     */
1228    if (devinfo->ver >= 6) {
1229       write_commit = false;
1230    } else {
1231       /* The visitor set up our destination register to be g0.  This
1232        * means that when the next read comes along, we will end up
1233        * reading from g0 and causing a block on the write commit.  For
1234        * write-after-read, we are relying on the value of the previous
1235        * read being used (and thus blocking on completion) before our
1236        * write is executed.  This means we have to be careful in
1237        * instruction scheduling to not violate this assumption.
1238        */
1239       write_commit = true;
1240    }
1241 
1242    /* Each of the 8 channel enables is considered for whether each
1243     * dword is written.
1244     */
1245    brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
1246    brw_inst_set_sfid(p->devinfo, send, target_cache);
1247    brw_set_dest(p, send, dst);
1248    brw_set_src0(p, send, header);
1249    if (devinfo->ver < 6)
1250       brw_inst_set_cond_modifier(p->devinfo, send, inst->base_mrf);
1251    brw_set_desc(p, send,
1252                 brw_message_desc(devinfo, 3, write_commit, true) |
1253                 brw_dp_write_desc(devinfo,
1254                                   brw_scratch_surface_idx(p),
1255                                   BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD,
1256                                   msg_type,
1257                                   write_commit));
1258 }
1259 
1260 static void
generate_pull_constant_load(struct brw_codegen * p,vec4_instruction * inst,struct brw_reg dst,struct brw_reg index,struct brw_reg offset)1261 generate_pull_constant_load(struct brw_codegen *p,
1262                             vec4_instruction *inst,
1263                             struct brw_reg dst,
1264                             struct brw_reg index,
1265                             struct brw_reg offset)
1266 {
1267    const struct intel_device_info *devinfo = p->devinfo;
1268    const unsigned target_cache =
1269       (devinfo->ver >= 6 ? GFX6_SFID_DATAPORT_SAMPLER_CACHE :
1270        BRW_SFID_DATAPORT_READ);
1271    assert(index.file == BRW_IMMEDIATE_VALUE &&
1272 	  index.type == BRW_REGISTER_TYPE_UD);
1273    uint32_t surf_index = index.ud;
1274 
1275    struct brw_reg header = brw_vec8_grf(0, 0);
1276 
1277    gfx6_resolve_implied_move(p, &header, inst->base_mrf);
1278 
1279    if (devinfo->ver >= 6) {
1280       if (offset.file == BRW_IMMEDIATE_VALUE) {
1281          brw_MOV(p, retype(brw_message_reg(inst->base_mrf + 1),
1282                            BRW_REGISTER_TYPE_D),
1283                  brw_imm_d(offset.ud >> 4));
1284       } else {
1285          brw_SHR(p, retype(brw_message_reg(inst->base_mrf + 1),
1286                            BRW_REGISTER_TYPE_D),
1287                  offset, brw_imm_d(4));
1288       }
1289    } else {
1290       brw_MOV(p, retype(brw_message_reg(inst->base_mrf + 1),
1291                         BRW_REGISTER_TYPE_D),
1292               offset);
1293    }
1294 
1295    uint32_t msg_type;
1296 
1297    if (devinfo->ver >= 6)
1298       msg_type = GFX6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
1299    else if (devinfo->verx10 >= 45)
1300       msg_type = G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
1301    else
1302       msg_type = BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
1303 
1304    /* Each of the 8 channel enables is considered for whether each
1305     * dword is written.
1306     */
1307    brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
1308    brw_inst_set_sfid(devinfo, send, target_cache);
1309    brw_set_dest(p, send, dst);
1310    brw_set_src0(p, send, header);
1311    if (devinfo->ver < 6)
1312       brw_inst_set_cond_modifier(p->devinfo, send, inst->base_mrf);
1313    brw_set_desc(p, send,
1314                 brw_message_desc(devinfo, 2, 1, true) |
1315                 brw_dp_read_desc(devinfo, surf_index,
1316                                  BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD,
1317                                  msg_type,
1318                                  BRW_DATAPORT_READ_TARGET_DATA_CACHE));
1319 }
1320 
1321 static void
generate_get_buffer_size(struct brw_codegen * p,vec4_instruction * inst,struct brw_reg dst,struct brw_reg src,struct brw_reg surf_index)1322 generate_get_buffer_size(struct brw_codegen *p,
1323                          vec4_instruction *inst,
1324                          struct brw_reg dst,
1325                          struct brw_reg src,
1326                          struct brw_reg surf_index)
1327 {
1328    assert(p->devinfo->ver >= 7);
1329    assert(surf_index.type == BRW_REGISTER_TYPE_UD &&
1330           surf_index.file == BRW_IMMEDIATE_VALUE);
1331 
1332    brw_SAMPLE(p,
1333               dst,
1334               inst->base_mrf,
1335               src,
1336               surf_index.ud,
1337               0,
1338               GFX5_SAMPLER_MESSAGE_SAMPLE_RESINFO,
1339               1, /* response length */
1340               inst->mlen,
1341               inst->header_size > 0,
1342               BRW_SAMPLER_SIMD_MODE_SIMD4X2,
1343               BRW_SAMPLER_RETURN_FORMAT_SINT32);
1344 }
1345 
1346 static void
generate_pull_constant_load_gfx7(struct brw_codegen * p,vec4_instruction * inst,struct brw_reg dst,struct brw_reg surf_index,struct brw_reg offset)1347 generate_pull_constant_load_gfx7(struct brw_codegen *p,
1348                                  vec4_instruction *inst,
1349                                  struct brw_reg dst,
1350                                  struct brw_reg surf_index,
1351                                  struct brw_reg offset)
1352 {
1353    const struct intel_device_info *devinfo = p->devinfo;
1354    assert(surf_index.type == BRW_REGISTER_TYPE_UD);
1355 
1356    if (surf_index.file == BRW_IMMEDIATE_VALUE) {
1357 
1358       brw_inst *insn = brw_next_insn(p, BRW_OPCODE_SEND);
1359       brw_inst_set_sfid(devinfo, insn, BRW_SFID_SAMPLER);
1360       brw_set_dest(p, insn, dst);
1361       brw_set_src0(p, insn, offset);
1362       brw_set_desc(p, insn,
1363                    brw_message_desc(devinfo, inst->mlen, 1, inst->header_size) |
1364                    brw_sampler_desc(devinfo, surf_index.ud,
1365                                     0, /* LD message ignores sampler unit */
1366                                     GFX5_SAMPLER_MESSAGE_SAMPLE_LD,
1367                                     BRW_SAMPLER_SIMD_MODE_SIMD4X2, 0));
1368    } else {
1369 
1370       struct brw_reg addr = vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD));
1371 
1372       brw_push_insn_state(p);
1373       brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1374       brw_set_default_access_mode(p, BRW_ALIGN_1);
1375 
1376       /* a0.0 = surf_index & 0xff */
1377       brw_inst *insn_and = brw_next_insn(p, BRW_OPCODE_AND);
1378       brw_inst_set_exec_size(devinfo, insn_and, BRW_EXECUTE_1);
1379       brw_set_dest(p, insn_and, addr);
1380       brw_set_src0(p, insn_and, vec1(retype(surf_index, BRW_REGISTER_TYPE_UD)));
1381       brw_set_src1(p, insn_and, brw_imm_ud(0x0ff));
1382 
1383       brw_pop_insn_state(p);
1384 
1385       /* dst = send(offset, a0.0 | <descriptor>) */
1386       brw_send_indirect_message(
1387          p, BRW_SFID_SAMPLER, dst, offset, addr,
1388          brw_message_desc(devinfo, inst->mlen, 1, inst->header_size) |
1389          brw_sampler_desc(devinfo,
1390                           0 /* surface */,
1391                           0 /* sampler */,
1392                           GFX5_SAMPLER_MESSAGE_SAMPLE_LD,
1393                           BRW_SAMPLER_SIMD_MODE_SIMD4X2,
1394                           0),
1395          false /* EOT */);
1396    }
1397 }
1398 
1399 static void
generate_mov_indirect(struct brw_codegen * p,vec4_instruction *,struct brw_reg dst,struct brw_reg reg,struct brw_reg indirect)1400 generate_mov_indirect(struct brw_codegen *p,
1401                       vec4_instruction *,
1402                       struct brw_reg dst, struct brw_reg reg,
1403                       struct brw_reg indirect)
1404 {
1405    assert(indirect.type == BRW_REGISTER_TYPE_UD);
1406    assert(p->devinfo->ver >= 6);
1407 
1408    unsigned imm_byte_offset = reg.nr * REG_SIZE + reg.subnr * (REG_SIZE / 2);
1409 
1410    /* This instruction acts in align1 mode */
1411    assert(dst.writemask == WRITEMASK_XYZW);
1412 
1413    if (indirect.file == BRW_IMMEDIATE_VALUE) {
1414       imm_byte_offset += indirect.ud;
1415 
1416       reg.nr = imm_byte_offset / REG_SIZE;
1417       reg.subnr = (imm_byte_offset / (REG_SIZE / 2)) % 2;
1418       unsigned shift = (imm_byte_offset / 4) % 4;
1419       reg.swizzle += BRW_SWIZZLE4(shift, shift, shift, shift);
1420 
1421       brw_MOV(p, dst, reg);
1422    } else {
1423       brw_push_insn_state(p);
1424       brw_set_default_access_mode(p, BRW_ALIGN_1);
1425       brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1426 
1427       struct brw_reg addr = vec8(brw_address_reg(0));
1428 
1429       /* We need to move the indirect value into the address register.  In
1430        * order to make things make some sense, we want to respect at least the
1431        * X component of the swizzle.  In order to do that, we need to convert
1432        * the subnr (probably 0) to an align1 subnr and add in the swizzle.
1433        */
1434       assert(brw_is_single_value_swizzle(indirect.swizzle));
1435       indirect.subnr = (indirect.subnr * 4 + BRW_GET_SWZ(indirect.swizzle, 0));
1436 
1437       /* We then use a region of <8,4,0>:uw to pick off the first 2 bytes of
1438        * the indirect and splat it out to all four channels of the given half
1439        * of a0.
1440        */
1441       indirect.subnr *= 2;
1442       indirect = stride(retype(indirect, BRW_REGISTER_TYPE_UW), 8, 4, 0);
1443       brw_ADD(p, addr, indirect, brw_imm_uw(imm_byte_offset));
1444 
1445       /* Now we need to incorporate the swizzle from the source register */
1446       if (reg.swizzle != BRW_SWIZZLE_XXXX) {
1447          uint32_t uv_swiz = BRW_GET_SWZ(reg.swizzle, 0) << 2 |
1448                             BRW_GET_SWZ(reg.swizzle, 1) << 6 |
1449                             BRW_GET_SWZ(reg.swizzle, 2) << 10 |
1450                             BRW_GET_SWZ(reg.swizzle, 3) << 14;
1451          uv_swiz |= uv_swiz << 16;
1452 
1453          brw_ADD(p, addr, addr, brw_imm_uv(uv_swiz));
1454       }
1455 
1456       brw_MOV(p, dst, retype(brw_VxH_indirect(0, 0), reg.type));
1457 
1458       brw_pop_insn_state(p);
1459    }
1460 }
1461 
1462 static void
generate_zero_oob_push_regs(struct brw_codegen * p,struct brw_stage_prog_data * prog_data,struct brw_reg scratch,struct brw_reg bit_mask_in)1463 generate_zero_oob_push_regs(struct brw_codegen *p,
1464                             struct brw_stage_prog_data *prog_data,
1465                             struct brw_reg scratch,
1466                             struct brw_reg bit_mask_in)
1467 {
1468    const uint64_t want_zero = prog_data->zero_push_reg;
1469    assert(want_zero);
1470 
1471    assert(bit_mask_in.file == BRW_GENERAL_REGISTER_FILE);
1472    assert(BRW_GET_SWZ(bit_mask_in.swizzle, 1) ==
1473           BRW_GET_SWZ(bit_mask_in.swizzle, 0) + 1);
1474    bit_mask_in.subnr += BRW_GET_SWZ(bit_mask_in.swizzle, 0) * 4;
1475    bit_mask_in.type = BRW_REGISTER_TYPE_W;
1476 
1477    /* Scratch should be 3 registers in the GRF */
1478    assert(scratch.file == BRW_GENERAL_REGISTER_FILE);
1479    scratch = vec8(scratch);
1480    struct brw_reg mask_w16 = retype(scratch, BRW_REGISTER_TYPE_W);
1481    struct brw_reg mask_d16 = retype(byte_offset(scratch, REG_SIZE),
1482                                     BRW_REGISTER_TYPE_D);
1483 
1484    brw_push_insn_state(p);
1485    brw_set_default_access_mode(p, BRW_ALIGN_1);
1486    brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1487 
1488    for (unsigned i = 0; i < 64; i++) {
1489       if (i % 16 == 0 && (want_zero & BITFIELD64_RANGE(i, 16))) {
1490          brw_set_default_exec_size(p, BRW_EXECUTE_8);
1491          brw_SHL(p, suboffset(mask_w16, 8),
1492                     vec1(byte_offset(bit_mask_in, i / 8)),
1493                     brw_imm_v(0x01234567));
1494          brw_SHL(p, mask_w16, suboffset(mask_w16, 8), brw_imm_w(8));
1495 
1496          brw_set_default_exec_size(p, BRW_EXECUTE_16);
1497          brw_ASR(p, mask_d16, mask_w16, brw_imm_w(15));
1498       }
1499 
1500       if (want_zero & BITFIELD64_BIT(i)) {
1501          unsigned push_start = prog_data->dispatch_grf_start_reg;
1502          struct brw_reg push_reg =
1503             retype(brw_vec8_grf(push_start + i, 0), BRW_REGISTER_TYPE_D);
1504 
1505          brw_set_default_exec_size(p, BRW_EXECUTE_8);
1506          brw_AND(p, push_reg, push_reg, vec1(suboffset(mask_d16, i)));
1507       }
1508    }
1509 
1510    brw_pop_insn_state(p);
1511 }
1512 
1513 static void
generate_code(struct brw_codegen * p,const struct brw_compiler * compiler,void * log_data,const nir_shader * nir,struct brw_vue_prog_data * prog_data,const struct cfg_t * cfg,const performance & perf,struct brw_compile_stats * stats,bool debug_enabled)1514 generate_code(struct brw_codegen *p,
1515               const struct brw_compiler *compiler,
1516               void *log_data,
1517               const nir_shader *nir,
1518               struct brw_vue_prog_data *prog_data,
1519               const struct cfg_t *cfg,
1520               const performance &perf,
1521               struct brw_compile_stats *stats,
1522               bool debug_enabled)
1523 {
1524    const struct intel_device_info *devinfo = p->devinfo;
1525    const char *stage_abbrev = _mesa_shader_stage_to_abbrev(nir->info.stage);
1526    struct disasm_info *disasm_info = disasm_initialize(p->isa, cfg);
1527 
1528    /* `send_count` explicitly does not include spills or fills, as we'd
1529     * like to use it as a metric for intentional memory access or other
1530     * shared function use.  Otherwise, subtle changes to scheduling or
1531     * register allocation could cause it to fluctuate wildly - and that
1532     * effect is already counted in spill/fill counts.
1533     */
1534    int spill_count = 0, fill_count = 0;
1535    int loop_count = 0, send_count = 0;
1536 
1537    foreach_block_and_inst (block, vec4_instruction, inst, cfg) {
1538       struct brw_reg src[3], dst;
1539 
1540       if (unlikely(debug_enabled))
1541          disasm_annotate(disasm_info, inst, p->next_insn_offset);
1542 
1543       for (unsigned int i = 0; i < 3; i++) {
1544          src[i] = inst->src[i].as_brw_reg();
1545       }
1546       dst = inst->dst.as_brw_reg();
1547 
1548       brw_set_default_predicate_control(p, inst->predicate);
1549       brw_set_default_predicate_inverse(p, inst->predicate_inverse);
1550       brw_set_default_flag_reg(p, inst->flag_subreg / 2, inst->flag_subreg % 2);
1551       brw_set_default_saturate(p, inst->saturate);
1552       brw_set_default_mask_control(p, inst->force_writemask_all);
1553       brw_set_default_acc_write_control(p, inst->writes_accumulator);
1554 
1555       assert(inst->group % inst->exec_size == 0);
1556       assert(inst->group % 4 == 0);
1557 
1558       /* There are some instructions where the destination is 64-bit
1559        * but we retype it to a smaller type. In that case, we cannot
1560        * double the exec_size.
1561        */
1562       const bool is_df = (get_exec_type_size(inst) == 8 ||
1563                           inst->dst.type == BRW_REGISTER_TYPE_DF) &&
1564                          inst->opcode != VEC4_OPCODE_PICK_LOW_32BIT &&
1565                          inst->opcode != VEC4_OPCODE_PICK_HIGH_32BIT &&
1566                          inst->opcode != VEC4_OPCODE_SET_LOW_32BIT &&
1567                          inst->opcode != VEC4_OPCODE_SET_HIGH_32BIT;
1568 
1569       unsigned exec_size = inst->exec_size;
1570       if (devinfo->verx10 == 70 && is_df)
1571          exec_size *= 2;
1572 
1573       brw_set_default_exec_size(p, cvt(exec_size) - 1);
1574 
1575       if (!inst->force_writemask_all)
1576          brw_set_default_group(p, inst->group);
1577 
1578       assert(inst->base_mrf + inst->mlen <= BRW_MAX_MRF(devinfo->ver));
1579       assert(inst->mlen <= BRW_MAX_MSG_LENGTH);
1580 
1581       unsigned pre_emit_nr_insn = p->nr_insn;
1582 
1583       switch (inst->opcode) {
1584       case VEC4_OPCODE_UNPACK_UNIFORM:
1585       case BRW_OPCODE_MOV:
1586       case VEC4_OPCODE_MOV_FOR_SCRATCH:
1587          brw_MOV(p, dst, src[0]);
1588          break;
1589       case BRW_OPCODE_ADD:
1590          brw_ADD(p, dst, src[0], src[1]);
1591          break;
1592       case BRW_OPCODE_MUL:
1593          brw_MUL(p, dst, src[0], src[1]);
1594          break;
1595       case BRW_OPCODE_MACH:
1596          brw_MACH(p, dst, src[0], src[1]);
1597          break;
1598 
1599       case BRW_OPCODE_MAD:
1600          assert(devinfo->ver >= 6);
1601          brw_MAD(p, dst, src[0], src[1], src[2]);
1602          break;
1603 
1604       case BRW_OPCODE_FRC:
1605          brw_FRC(p, dst, src[0]);
1606          break;
1607       case BRW_OPCODE_RNDD:
1608          brw_RNDD(p, dst, src[0]);
1609          break;
1610       case BRW_OPCODE_RNDE:
1611          brw_RNDE(p, dst, src[0]);
1612          break;
1613       case BRW_OPCODE_RNDZ:
1614          brw_RNDZ(p, dst, src[0]);
1615          break;
1616 
1617       case BRW_OPCODE_AND:
1618          brw_AND(p, dst, src[0], src[1]);
1619          break;
1620       case BRW_OPCODE_OR:
1621          brw_OR(p, dst, src[0], src[1]);
1622          break;
1623       case BRW_OPCODE_XOR:
1624          brw_XOR(p, dst, src[0], src[1]);
1625          break;
1626       case BRW_OPCODE_NOT:
1627          brw_NOT(p, dst, src[0]);
1628          break;
1629       case BRW_OPCODE_ASR:
1630          brw_ASR(p, dst, src[0], src[1]);
1631          break;
1632       case BRW_OPCODE_SHR:
1633          brw_SHR(p, dst, src[0], src[1]);
1634          break;
1635       case BRW_OPCODE_SHL:
1636          brw_SHL(p, dst, src[0], src[1]);
1637          break;
1638 
1639       case BRW_OPCODE_CMP:
1640          brw_CMP(p, dst, inst->conditional_mod, src[0], src[1]);
1641          break;
1642       case BRW_OPCODE_CMPN:
1643          brw_CMPN(p, dst, inst->conditional_mod, src[0], src[1]);
1644          break;
1645       case BRW_OPCODE_SEL:
1646          brw_SEL(p, dst, src[0], src[1]);
1647          break;
1648 
1649       case BRW_OPCODE_DPH:
1650          brw_DPH(p, dst, src[0], src[1]);
1651          break;
1652 
1653       case BRW_OPCODE_DP4:
1654          brw_DP4(p, dst, src[0], src[1]);
1655          break;
1656 
1657       case BRW_OPCODE_DP3:
1658          brw_DP3(p, dst, src[0], src[1]);
1659          break;
1660 
1661       case BRW_OPCODE_DP2:
1662          brw_DP2(p, dst, src[0], src[1]);
1663          break;
1664 
1665       case BRW_OPCODE_F32TO16:
1666          assert(devinfo->ver >= 7);
1667          brw_F32TO16(p, dst, src[0]);
1668          break;
1669 
1670       case BRW_OPCODE_F16TO32:
1671          assert(devinfo->ver >= 7);
1672          brw_F16TO32(p, dst, src[0]);
1673          break;
1674 
1675       case BRW_OPCODE_LRP:
1676          assert(devinfo->ver >= 6);
1677          brw_LRP(p, dst, src[0], src[1], src[2]);
1678          break;
1679 
1680       case BRW_OPCODE_BFREV:
1681          assert(devinfo->ver >= 7);
1682          brw_BFREV(p, retype(dst, BRW_REGISTER_TYPE_UD),
1683                    retype(src[0], BRW_REGISTER_TYPE_UD));
1684          break;
1685       case BRW_OPCODE_FBH:
1686          assert(devinfo->ver >= 7);
1687          brw_FBH(p, retype(dst, src[0].type), src[0]);
1688          break;
1689       case BRW_OPCODE_FBL:
1690          assert(devinfo->ver >= 7);
1691          brw_FBL(p, retype(dst, BRW_REGISTER_TYPE_UD),
1692                  retype(src[0], BRW_REGISTER_TYPE_UD));
1693          break;
1694       case BRW_OPCODE_LZD:
1695          brw_LZD(p, dst, src[0]);
1696          break;
1697       case BRW_OPCODE_CBIT:
1698          assert(devinfo->ver >= 7);
1699          brw_CBIT(p, retype(dst, BRW_REGISTER_TYPE_UD),
1700                   retype(src[0], BRW_REGISTER_TYPE_UD));
1701          break;
1702       case BRW_OPCODE_ADDC:
1703          assert(devinfo->ver >= 7);
1704          brw_ADDC(p, dst, src[0], src[1]);
1705          break;
1706       case BRW_OPCODE_SUBB:
1707          assert(devinfo->ver >= 7);
1708          brw_SUBB(p, dst, src[0], src[1]);
1709          break;
1710       case BRW_OPCODE_MAC:
1711          brw_MAC(p, dst, src[0], src[1]);
1712          break;
1713 
1714       case BRW_OPCODE_BFE:
1715          assert(devinfo->ver >= 7);
1716          brw_BFE(p, dst, src[0], src[1], src[2]);
1717          break;
1718 
1719       case BRW_OPCODE_BFI1:
1720          assert(devinfo->ver >= 7);
1721          brw_BFI1(p, dst, src[0], src[1]);
1722          break;
1723       case BRW_OPCODE_BFI2:
1724          assert(devinfo->ver >= 7);
1725          brw_BFI2(p, dst, src[0], src[1], src[2]);
1726          break;
1727 
1728       case BRW_OPCODE_IF:
1729          if (!inst->src[0].is_null()) {
1730             /* The instruction has an embedded compare (only allowed on gfx6) */
1731             assert(devinfo->ver == 6);
1732             gfx6_IF(p, inst->conditional_mod, src[0], src[1]);
1733          } else {
1734             brw_inst *if_inst = brw_IF(p, BRW_EXECUTE_8);
1735             brw_inst_set_pred_control(p->devinfo, if_inst, inst->predicate);
1736          }
1737          break;
1738 
1739       case BRW_OPCODE_ELSE:
1740          brw_ELSE(p);
1741          break;
1742       case BRW_OPCODE_ENDIF:
1743          brw_ENDIF(p);
1744          break;
1745 
1746       case BRW_OPCODE_DO:
1747          brw_DO(p, BRW_EXECUTE_8);
1748          break;
1749 
1750       case BRW_OPCODE_BREAK:
1751          brw_BREAK(p);
1752          brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
1753          break;
1754       case BRW_OPCODE_CONTINUE:
1755          brw_CONT(p);
1756          brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
1757          break;
1758 
1759       case BRW_OPCODE_WHILE:
1760          brw_WHILE(p);
1761          loop_count++;
1762          break;
1763 
1764       case SHADER_OPCODE_RCP:
1765       case SHADER_OPCODE_RSQ:
1766       case SHADER_OPCODE_SQRT:
1767       case SHADER_OPCODE_EXP2:
1768       case SHADER_OPCODE_LOG2:
1769       case SHADER_OPCODE_SIN:
1770       case SHADER_OPCODE_COS:
1771          assert(inst->conditional_mod == BRW_CONDITIONAL_NONE);
1772          if (devinfo->ver >= 7) {
1773             gfx6_math(p, dst, brw_math_function(inst->opcode), src[0],
1774                       brw_null_reg());
1775          } else if (devinfo->ver == 6) {
1776             generate_math_gfx6(p, inst, dst, src[0], brw_null_reg());
1777          } else {
1778             generate_math1_gfx4(p, inst, dst, src[0]);
1779             send_count++;
1780          }
1781          break;
1782 
1783       case SHADER_OPCODE_POW:
1784       case SHADER_OPCODE_INT_QUOTIENT:
1785       case SHADER_OPCODE_INT_REMAINDER:
1786          assert(inst->conditional_mod == BRW_CONDITIONAL_NONE);
1787          if (devinfo->ver >= 7) {
1788             gfx6_math(p, dst, brw_math_function(inst->opcode), src[0], src[1]);
1789          } else if (devinfo->ver == 6) {
1790             generate_math_gfx6(p, inst, dst, src[0], src[1]);
1791          } else {
1792             generate_math2_gfx4(p, inst, dst, src[0], src[1]);
1793             send_count++;
1794          }
1795          break;
1796 
1797       case SHADER_OPCODE_TEX:
1798       case SHADER_OPCODE_TXD:
1799       case SHADER_OPCODE_TXF:
1800       case SHADER_OPCODE_TXF_CMS:
1801       case SHADER_OPCODE_TXF_CMS_W:
1802       case SHADER_OPCODE_TXF_MCS:
1803       case SHADER_OPCODE_TXL:
1804       case SHADER_OPCODE_TXS:
1805       case SHADER_OPCODE_TG4:
1806       case SHADER_OPCODE_TG4_OFFSET:
1807       case SHADER_OPCODE_SAMPLEINFO:
1808          generate_tex(p, prog_data, nir->info.stage,
1809                       inst, dst, src[0], src[1], src[2]);
1810          send_count++;
1811          break;
1812 
1813       case SHADER_OPCODE_GET_BUFFER_SIZE:
1814          generate_get_buffer_size(p, inst, dst, src[0], src[1]);
1815          send_count++;
1816          break;
1817 
1818       case VEC4_VS_OPCODE_URB_WRITE:
1819          generate_vs_urb_write(p, inst);
1820          send_count++;
1821          break;
1822 
1823       case SHADER_OPCODE_GFX4_SCRATCH_READ:
1824          generate_scratch_read(p, inst, dst, src[0]);
1825          fill_count++;
1826          break;
1827 
1828       case SHADER_OPCODE_GFX4_SCRATCH_WRITE:
1829          generate_scratch_write(p, inst, dst, src[0], src[1]);
1830          spill_count++;
1831          break;
1832 
1833       case VS_OPCODE_PULL_CONSTANT_LOAD:
1834          generate_pull_constant_load(p, inst, dst, src[0], src[1]);
1835          send_count++;
1836          break;
1837 
1838       case VS_OPCODE_PULL_CONSTANT_LOAD_GFX7:
1839          generate_pull_constant_load_gfx7(p, inst, dst, src[0], src[1]);
1840          send_count++;
1841          break;
1842 
1843       case VEC4_GS_OPCODE_URB_WRITE:
1844          generate_gs_urb_write(p, inst);
1845          send_count++;
1846          break;
1847 
1848       case VEC4_GS_OPCODE_URB_WRITE_ALLOCATE:
1849          generate_gs_urb_write_allocate(p, inst);
1850          send_count++;
1851          break;
1852 
1853       case GS_OPCODE_SVB_WRITE:
1854          generate_gs_svb_write(p, inst, dst, src[0], src[1]);
1855          send_count++;
1856          break;
1857 
1858       case GS_OPCODE_SVB_SET_DST_INDEX:
1859          generate_gs_svb_set_destination_index(p, inst, dst, src[0]);
1860          break;
1861 
1862       case GS_OPCODE_THREAD_END:
1863          generate_gs_thread_end(p, inst);
1864          send_count++;
1865          break;
1866 
1867       case GS_OPCODE_SET_WRITE_OFFSET:
1868          generate_gs_set_write_offset(p, dst, src[0], src[1]);
1869          break;
1870 
1871       case GS_OPCODE_SET_VERTEX_COUNT:
1872          generate_gs_set_vertex_count(p, dst, src[0]);
1873          break;
1874 
1875       case GS_OPCODE_FF_SYNC:
1876          generate_gs_ff_sync(p, inst, dst, src[0], src[1]);
1877          send_count++;
1878          break;
1879 
1880       case GS_OPCODE_FF_SYNC_SET_PRIMITIVES:
1881          generate_gs_ff_sync_set_primitives(p, dst, src[0], src[1], src[2]);
1882          break;
1883 
1884       case GS_OPCODE_SET_PRIMITIVE_ID:
1885          generate_gs_set_primitive_id(p, dst);
1886          break;
1887 
1888       case GS_OPCODE_SET_DWORD_2:
1889          generate_gs_set_dword_2(p, dst, src[0]);
1890          break;
1891 
1892       case GS_OPCODE_PREPARE_CHANNEL_MASKS:
1893          generate_gs_prepare_channel_masks(p, dst);
1894          break;
1895 
1896       case GS_OPCODE_SET_CHANNEL_MASKS:
1897          generate_gs_set_channel_masks(p, dst, src[0]);
1898          break;
1899 
1900       case GS_OPCODE_GET_INSTANCE_ID:
1901          generate_gs_get_instance_id(p, dst);
1902          break;
1903 
1904       case VEC4_OPCODE_UNTYPED_ATOMIC:
1905          assert(src[2].file == BRW_IMMEDIATE_VALUE);
1906          brw_untyped_atomic(p, dst, src[0], src[1], src[2].ud, inst->mlen,
1907                             !inst->dst.is_null(), inst->header_size);
1908          send_count++;
1909          break;
1910 
1911       case VEC4_OPCODE_UNTYPED_SURFACE_READ:
1912          assert(!inst->header_size);
1913          assert(src[2].file == BRW_IMMEDIATE_VALUE);
1914          brw_untyped_surface_read(p, dst, src[0], src[1], inst->mlen,
1915                                   src[2].ud);
1916          send_count++;
1917          break;
1918 
1919       case VEC4_OPCODE_UNTYPED_SURFACE_WRITE:
1920          assert(src[2].file == BRW_IMMEDIATE_VALUE);
1921          brw_untyped_surface_write(p, src[0], src[1], inst->mlen,
1922                                    src[2].ud, inst->header_size);
1923          send_count++;
1924          break;
1925 
1926       case SHADER_OPCODE_MEMORY_FENCE:
1927          brw_memory_fence(p, dst, src[0], BRW_OPCODE_SEND,
1928                           brw_message_target(inst->sfid),
1929                           inst->desc,
1930                           /* commit_enable */ false,
1931                           /* bti */ 0);
1932          send_count++;
1933          break;
1934 
1935       case SHADER_OPCODE_FIND_LIVE_CHANNEL:
1936          brw_find_live_channel(p, dst, false);
1937          break;
1938 
1939       case SHADER_OPCODE_BROADCAST:
1940          assert(inst->force_writemask_all);
1941          brw_broadcast(p, dst, src[0], src[1]);
1942          break;
1943 
1944       case VS_OPCODE_UNPACK_FLAGS_SIMD4X2:
1945          generate_unpack_flags(p, dst);
1946          break;
1947 
1948       case VEC4_OPCODE_MOV_BYTES: {
1949          /* Moves the low byte from each channel, using an Align1 access mode
1950           * and a <4,1,0> source region.
1951           */
1952          assert(src[0].type == BRW_REGISTER_TYPE_UB ||
1953                 src[0].type == BRW_REGISTER_TYPE_B);
1954 
1955          brw_set_default_access_mode(p, BRW_ALIGN_1);
1956          src[0].vstride = BRW_VERTICAL_STRIDE_4;
1957          src[0].width = BRW_WIDTH_1;
1958          src[0].hstride = BRW_HORIZONTAL_STRIDE_0;
1959          brw_MOV(p, dst, src[0]);
1960          brw_set_default_access_mode(p, BRW_ALIGN_16);
1961          break;
1962       }
1963 
1964       case VEC4_OPCODE_DOUBLE_TO_F32:
1965       case VEC4_OPCODE_DOUBLE_TO_D32:
1966       case VEC4_OPCODE_DOUBLE_TO_U32: {
1967          assert(type_sz(src[0].type) == 8);
1968          assert(type_sz(dst.type) == 8);
1969 
1970          brw_reg_type dst_type;
1971 
1972          switch (inst->opcode) {
1973          case VEC4_OPCODE_DOUBLE_TO_F32:
1974             dst_type = BRW_REGISTER_TYPE_F;
1975             break;
1976          case VEC4_OPCODE_DOUBLE_TO_D32:
1977             dst_type = BRW_REGISTER_TYPE_D;
1978             break;
1979          case VEC4_OPCODE_DOUBLE_TO_U32:
1980             dst_type = BRW_REGISTER_TYPE_UD;
1981             break;
1982          default:
1983             unreachable("Not supported conversion");
1984          }
1985          dst = retype(dst, dst_type);
1986 
1987          brw_set_default_access_mode(p, BRW_ALIGN_1);
1988 
1989          /* When converting from DF->F, we set destination's stride as 2 as an
1990           * alignment requirement. But in IVB/BYT, each DF implicitly writes
1991           * two floats, being the first one the converted value. So we don't
1992           * need to explicitly set stride 2, but 1.
1993           */
1994          struct brw_reg spread_dst;
1995          if (devinfo->verx10 == 70)
1996             spread_dst = stride(dst, 8, 4, 1);
1997          else
1998             spread_dst = stride(dst, 8, 4, 2);
1999 
2000          brw_MOV(p, spread_dst, src[0]);
2001 
2002          brw_set_default_access_mode(p, BRW_ALIGN_16);
2003          break;
2004       }
2005 
2006       case VEC4_OPCODE_TO_DOUBLE: {
2007          assert(type_sz(src[0].type) == 4);
2008          assert(type_sz(dst.type) == 8);
2009 
2010          brw_set_default_access_mode(p, BRW_ALIGN_1);
2011 
2012          brw_MOV(p, dst, src[0]);
2013 
2014          brw_set_default_access_mode(p, BRW_ALIGN_16);
2015          break;
2016       }
2017 
2018       case VEC4_OPCODE_PICK_LOW_32BIT:
2019       case VEC4_OPCODE_PICK_HIGH_32BIT: {
2020          /* Stores the low/high 32-bit of each 64-bit element in src[0] into
2021           * dst using ALIGN1 mode and a <8,4,2>:UD region on the source.
2022           */
2023          assert(type_sz(src[0].type) == 8);
2024          assert(type_sz(dst.type) == 4);
2025 
2026          brw_set_default_access_mode(p, BRW_ALIGN_1);
2027 
2028          dst = retype(dst, BRW_REGISTER_TYPE_UD);
2029          dst.hstride = BRW_HORIZONTAL_STRIDE_1;
2030 
2031          src[0] = retype(src[0], BRW_REGISTER_TYPE_UD);
2032          if (inst->opcode == VEC4_OPCODE_PICK_HIGH_32BIT)
2033             src[0] = suboffset(src[0], 1);
2034          src[0] = spread(src[0], 2);
2035          brw_MOV(p, dst, src[0]);
2036 
2037          brw_set_default_access_mode(p, BRW_ALIGN_16);
2038          break;
2039       }
2040 
2041       case VEC4_OPCODE_SET_LOW_32BIT:
2042       case VEC4_OPCODE_SET_HIGH_32BIT: {
2043          /* Reads consecutive 32-bit elements from src[0] and writes
2044           * them to the low/high 32-bit of each 64-bit element in dst.
2045           */
2046          assert(type_sz(src[0].type) == 4);
2047          assert(type_sz(dst.type) == 8);
2048 
2049          brw_set_default_access_mode(p, BRW_ALIGN_1);
2050 
2051          dst = retype(dst, BRW_REGISTER_TYPE_UD);
2052          if (inst->opcode == VEC4_OPCODE_SET_HIGH_32BIT)
2053             dst = suboffset(dst, 1);
2054          dst.hstride = BRW_HORIZONTAL_STRIDE_2;
2055 
2056          src[0] = retype(src[0], BRW_REGISTER_TYPE_UD);
2057          brw_MOV(p, dst, src[0]);
2058 
2059          brw_set_default_access_mode(p, BRW_ALIGN_16);
2060          break;
2061       }
2062 
2063       case VEC4_OPCODE_PACK_BYTES: {
2064          /* Is effectively:
2065           *
2066           *   mov(8) dst<16,4,1>:UB src<4,1,0>:UB
2067           *
2068           * but destinations' only regioning is horizontal stride, so instead we
2069           * have to use two instructions:
2070           *
2071           *   mov(4) dst<1>:UB     src<4,1,0>:UB
2072           *   mov(4) dst.16<1>:UB  src.16<4,1,0>:UB
2073           *
2074           * where they pack the four bytes from the low and high four DW.
2075           */
2076          assert(util_is_power_of_two_nonzero(dst.writemask));
2077          unsigned offset = __builtin_ctz(dst.writemask);
2078 
2079          dst.type = BRW_REGISTER_TYPE_UB;
2080 
2081          brw_set_default_access_mode(p, BRW_ALIGN_1);
2082 
2083          src[0].type = BRW_REGISTER_TYPE_UB;
2084          src[0].vstride = BRW_VERTICAL_STRIDE_4;
2085          src[0].width = BRW_WIDTH_1;
2086          src[0].hstride = BRW_HORIZONTAL_STRIDE_0;
2087          dst.subnr = offset * 4;
2088          struct brw_inst *insn = brw_MOV(p, dst, src[0]);
2089          brw_inst_set_exec_size(p->devinfo, insn, BRW_EXECUTE_4);
2090          brw_inst_set_no_dd_clear(p->devinfo, insn, true);
2091          brw_inst_set_no_dd_check(p->devinfo, insn, inst->no_dd_check);
2092 
2093          src[0].subnr = 16;
2094          dst.subnr = 16 + offset * 4;
2095          insn = brw_MOV(p, dst, src[0]);
2096          brw_inst_set_exec_size(p->devinfo, insn, BRW_EXECUTE_4);
2097          brw_inst_set_no_dd_clear(p->devinfo, insn, inst->no_dd_clear);
2098          brw_inst_set_no_dd_check(p->devinfo, insn, true);
2099 
2100          brw_set_default_access_mode(p, BRW_ALIGN_16);
2101          break;
2102       }
2103 
2104       case VEC4_OPCODE_ZERO_OOB_PUSH_REGS:
2105          generate_zero_oob_push_regs(p, &prog_data->base, dst, src[0]);
2106          break;
2107 
2108       case VEC4_TCS_OPCODE_URB_WRITE:
2109          generate_tcs_urb_write(p, inst, src[0]);
2110          send_count++;
2111          break;
2112 
2113       case VEC4_OPCODE_URB_READ:
2114          generate_vec4_urb_read(p, inst, dst, src[0]);
2115          send_count++;
2116          break;
2117 
2118       case VEC4_TCS_OPCODE_SET_INPUT_URB_OFFSETS:
2119          generate_tcs_input_urb_offsets(p, dst, src[0], src[1]);
2120          break;
2121 
2122       case VEC4_TCS_OPCODE_SET_OUTPUT_URB_OFFSETS:
2123          generate_tcs_output_urb_offsets(p, dst, src[0], src[1]);
2124          break;
2125 
2126       case TCS_OPCODE_GET_INSTANCE_ID:
2127          generate_tcs_get_instance_id(p, dst);
2128          break;
2129 
2130       case TCS_OPCODE_GET_PRIMITIVE_ID:
2131          generate_tcs_get_primitive_id(p, dst);
2132          break;
2133 
2134       case TCS_OPCODE_CREATE_BARRIER_HEADER:
2135          generate_tcs_create_barrier_header(p, prog_data, dst);
2136          break;
2137 
2138       case TES_OPCODE_CREATE_INPUT_READ_HEADER:
2139          generate_tes_create_input_read_header(p, dst);
2140          break;
2141 
2142       case TES_OPCODE_ADD_INDIRECT_URB_OFFSET:
2143          generate_tes_add_indirect_urb_offset(p, dst, src[0], src[1]);
2144          break;
2145 
2146       case TES_OPCODE_GET_PRIMITIVE_ID:
2147          generate_tes_get_primitive_id(p, dst);
2148          break;
2149 
2150       case TCS_OPCODE_SRC0_010_IS_ZERO:
2151          /* If src_reg had stride like fs_reg, we wouldn't need this. */
2152          brw_MOV(p, brw_null_reg(), stride(src[0], 0, 1, 0));
2153          break;
2154 
2155       case TCS_OPCODE_RELEASE_INPUT:
2156          generate_tcs_release_input(p, dst, src[0], src[1]);
2157          send_count++;
2158          break;
2159 
2160       case TCS_OPCODE_THREAD_END:
2161          generate_tcs_thread_end(p, inst);
2162          send_count++;
2163          break;
2164 
2165       case SHADER_OPCODE_BARRIER:
2166          brw_barrier(p, src[0]);
2167          brw_WAIT(p);
2168          send_count++;
2169          break;
2170 
2171       case SHADER_OPCODE_MOV_INDIRECT:
2172          generate_mov_indirect(p, inst, dst, src[0], src[1]);
2173          break;
2174 
2175       case BRW_OPCODE_DIM:
2176          assert(devinfo->verx10 == 75);
2177          assert(src[0].type == BRW_REGISTER_TYPE_DF);
2178          assert(dst.type == BRW_REGISTER_TYPE_DF);
2179          brw_DIM(p, dst, retype(src[0], BRW_REGISTER_TYPE_F));
2180          break;
2181 
2182       default:
2183          unreachable("Unsupported opcode");
2184       }
2185 
2186       if (inst->opcode == VEC4_OPCODE_PACK_BYTES) {
2187          /* Handled dependency hints in the generator. */
2188 
2189          assert(!inst->conditional_mod);
2190       } else if (inst->no_dd_clear || inst->no_dd_check || inst->conditional_mod) {
2191          assert(p->nr_insn == pre_emit_nr_insn + 1 ||
2192                 !"conditional_mod, no_dd_check, or no_dd_clear set for IR "
2193                  "emitting more than 1 instruction");
2194 
2195          brw_inst *last = &p->store[pre_emit_nr_insn];
2196 
2197          if (inst->conditional_mod)
2198             brw_inst_set_cond_modifier(p->devinfo, last, inst->conditional_mod);
2199          brw_inst_set_no_dd_clear(p->devinfo, last, inst->no_dd_clear);
2200          brw_inst_set_no_dd_check(p->devinfo, last, inst->no_dd_check);
2201       }
2202    }
2203 
2204    brw_set_uip_jip(p, 0);
2205 
2206    /* end of program sentinel */
2207    disasm_new_inst_group(disasm_info, p->next_insn_offset);
2208 
2209 #ifndef NDEBUG
2210    bool validated =
2211 #else
2212    if (unlikely(debug_enabled))
2213 #endif
2214       brw_validate_instructions(&compiler->isa, p->store,
2215                                 0, p->next_insn_offset,
2216                                 disasm_info);
2217 
2218    int before_size = p->next_insn_offset;
2219    brw_compact_instructions(p, 0, disasm_info);
2220    int after_size = p->next_insn_offset;
2221 
2222    if (unlikely(debug_enabled)) {
2223       unsigned char sha1[21];
2224       char sha1buf[41];
2225 
2226       _mesa_sha1_compute(p->store, p->next_insn_offset, sha1);
2227       _mesa_sha1_format(sha1buf, sha1);
2228 
2229       fprintf(stderr, "Native code for %s %s shader %s (sha1 %s):\n",
2230             nir->info.label ? nir->info.label : "unnamed",
2231             _mesa_shader_stage_to_string(nir->info.stage), nir->info.name,
2232             sha1buf);
2233 
2234       fprintf(stderr, "%s vec4 shader: %d instructions. %d loops. %u cycles. %d:%d "
2235                      "spills:fills, %u sends. Compacted %d to %d bytes (%.0f%%)\n",
2236             stage_abbrev, before_size / 16, loop_count, perf.latency,
2237             spill_count, fill_count, send_count, before_size, after_size,
2238             100.0f * (before_size - after_size) / before_size);
2239 
2240       /* overriding the shader makes disasm_info invalid */
2241       if (!brw_try_override_assembly(p, 0, sha1buf)) {
2242          dump_assembly(p->store, 0, p->next_insn_offset,
2243                        disasm_info, perf.block_latency);
2244       } else {
2245          fprintf(stderr, "Successfully overrode shader with sha1 %s\n\n", sha1buf);
2246       }
2247    }
2248    ralloc_free(disasm_info);
2249    assert(validated);
2250 
2251    brw_shader_debug_log(compiler, log_data,
2252                         "%s vec4 shader: %d inst, %d loops, %u cycles, "
2253                         "%d:%d spills:fills, %u sends, "
2254                         "compacted %d to %d bytes.\n",
2255                         stage_abbrev, before_size / 16,
2256                         loop_count, perf.latency, spill_count,
2257                         fill_count, send_count, before_size, after_size);
2258    if (stats) {
2259       stats->dispatch_width = 0;
2260       stats->instructions = before_size / 16;
2261       stats->sends = send_count;
2262       stats->loops = loop_count;
2263       stats->cycles = perf.latency;
2264       stats->spills = spill_count;
2265       stats->fills = fill_count;
2266    }
2267 }
2268 
2269 extern "C" const unsigned *
brw_vec4_generate_assembly(const struct brw_compiler * compiler,void * log_data,void * mem_ctx,const nir_shader * nir,struct brw_vue_prog_data * prog_data,const struct cfg_t * cfg,const performance & perf,struct brw_compile_stats * stats,bool debug_enabled)2270 brw_vec4_generate_assembly(const struct brw_compiler *compiler,
2271                            void *log_data,
2272                            void *mem_ctx,
2273                            const nir_shader *nir,
2274                            struct brw_vue_prog_data *prog_data,
2275                            const struct cfg_t *cfg,
2276                            const performance &perf,
2277                            struct brw_compile_stats *stats,
2278                            bool debug_enabled)
2279 {
2280    struct brw_codegen *p = rzalloc(mem_ctx, struct brw_codegen);
2281    brw_init_codegen(&compiler->isa, p, mem_ctx);
2282    brw_set_default_access_mode(p, BRW_ALIGN_16);
2283 
2284    generate_code(p, compiler, log_data, nir, prog_data, cfg, perf, stats,
2285                  debug_enabled);
2286 
2287    assert(prog_data->base.const_data_size == 0);
2288    if (nir->constant_data_size > 0) {
2289       prog_data->base.const_data_size = nir->constant_data_size;
2290       prog_data->base.const_data_offset =
2291          brw_append_data(p, nir->constant_data, nir->constant_data_size, 32);
2292    }
2293 
2294    return brw_get_program(p, &prog_data->base.program_size);
2295 }
2296