1 /*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file elk_vec4_gs_visitor.cpp
26 *
27 * Geometry-shader-specific code derived from the vec4_visitor class.
28 */
29
30 #include "elk_vec4_gs_visitor.h"
31 #include "elk_gfx6_gs_visitor.h"
32 #include "elk_eu.h"
33 #include "elk_cfg.h"
34 #include "elk_fs.h"
35 #include "elk_nir.h"
36 #include "elk_prim.h"
37 #include "elk_private.h"
38 #include "dev/intel_debug.h"
39
40 namespace elk {
41
vec4_gs_visitor(const struct elk_compiler * compiler,const struct elk_compile_params * params,struct elk_gs_compile * c,struct elk_gs_prog_data * prog_data,const nir_shader * shader,bool no_spills,bool debug_enabled)42 vec4_gs_visitor::vec4_gs_visitor(const struct elk_compiler *compiler,
43 const struct elk_compile_params *params,
44 struct elk_gs_compile *c,
45 struct elk_gs_prog_data *prog_data,
46 const nir_shader *shader,
47 bool no_spills,
48 bool debug_enabled)
49 : vec4_visitor(compiler, params, &c->key.base.tex,
50 &prog_data->base, shader,
51 no_spills, debug_enabled),
52 c(c),
53 gs_prog_data(prog_data)
54 {
55 }
56
57
58 static inline struct elk_reg
attribute_to_hw_reg(int attr,elk_reg_type type,bool interleaved)59 attribute_to_hw_reg(int attr, elk_reg_type type, bool interleaved)
60 {
61 struct elk_reg reg;
62
63 unsigned width = REG_SIZE / 2 / MAX2(4, type_sz(type));
64 if (interleaved) {
65 reg = stride(elk_vecn_grf(width, attr / 2, (attr % 2) * 4), 0, width, 1);
66 } else {
67 reg = elk_vecn_grf(width, attr, 0);
68 }
69
70 reg.type = type;
71 return reg;
72 }
73
74 /**
75 * Replace each register of type ATTR in this->instructions with a reference
76 * to a fixed HW register.
77 *
78 * If interleaved is true, then each attribute takes up half a register, with
79 * register N containing attribute 2*N in its first half and attribute 2*N+1
80 * in its second half (this corresponds to the payload setup used by geometry
81 * shaders in "single" or "dual instanced" dispatch mode). If interleaved is
82 * false, then each attribute takes up a whole register, with register N
83 * containing attribute N (this corresponds to the payload setup used by
84 * vertex shaders, and by geometry shaders in "dual object" dispatch mode).
85 */
86 int
setup_varying_inputs(int payload_reg,int attributes_per_reg)87 vec4_gs_visitor::setup_varying_inputs(int payload_reg,
88 int attributes_per_reg)
89 {
90 /* For geometry shaders there are N copies of the input attributes, where N
91 * is the number of input vertices. attribute_map[ELK_VARYING_SLOT_COUNT *
92 * i + j] represents attribute j for vertex i.
93 *
94 * Note that GS inputs are read from the VUE 256 bits (2 vec4's) at a time,
95 * so the total number of input slots that will be delivered to the GS (and
96 * thus the stride of the input arrays) is urb_read_length * 2.
97 */
98 const unsigned num_input_vertices = nir->info.gs.vertices_in;
99 assert(num_input_vertices <= MAX_GS_INPUT_VERTICES);
100 unsigned input_array_stride = prog_data->urb_read_length * 2;
101
102 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
103 for (int i = 0; i < 3; i++) {
104 if (inst->src[i].file != ATTR)
105 continue;
106
107 assert(inst->src[i].offset % REG_SIZE == 0);
108 int grf = payload_reg * attributes_per_reg +
109 inst->src[i].nr + inst->src[i].offset / REG_SIZE;
110
111 struct elk_reg reg =
112 attribute_to_hw_reg(grf, inst->src[i].type, attributes_per_reg > 1);
113 reg.swizzle = inst->src[i].swizzle;
114 if (inst->src[i].abs)
115 reg = elk_abs(reg);
116 if (inst->src[i].negate)
117 reg = negate(reg);
118
119 inst->src[i] = reg;
120 }
121 }
122
123 int regs_used = ALIGN(input_array_stride * num_input_vertices,
124 attributes_per_reg) / attributes_per_reg;
125 return payload_reg + regs_used;
126 }
127
128 void
setup_payload()129 vec4_gs_visitor::setup_payload()
130 {
131 /* If we are in dual instanced or single mode, then attributes are going
132 * to be interleaved, so one register contains two attribute slots.
133 */
134 int attributes_per_reg =
135 prog_data->dispatch_mode == INTEL_DISPATCH_MODE_4X2_DUAL_OBJECT ? 1 : 2;
136
137 int reg = 0;
138
139 /* The payload always contains important data in r0, which contains
140 * the URB handles that are passed on to the URB write at the end
141 * of the thread.
142 */
143 reg++;
144
145 /* If the shader uses gl_PrimitiveIDIn, that goes in r1. */
146 if (gs_prog_data->include_primitive_id)
147 reg++;
148
149 reg = setup_uniforms(reg);
150
151 reg = setup_varying_inputs(reg, attributes_per_reg);
152
153 this->first_non_payload_grf = reg;
154 }
155
156
157 void
emit_prolog()158 vec4_gs_visitor::emit_prolog()
159 {
160 /* In vertex shaders, r0.2 is guaranteed to be initialized to zero. In
161 * geometry shaders, it isn't (it contains a bunch of information we don't
162 * need, like the input primitive type). We need r0.2 to be zero in order
163 * to build scratch read/write messages correctly (otherwise this value
164 * will be interpreted as a global offset, causing us to do our scratch
165 * reads/writes to garbage memory). So just set it to zero at the top of
166 * the shader.
167 */
168 this->current_annotation = "clear r0.2";
169 dst_reg r0(retype(elk_vec4_grf(0, 0), ELK_REGISTER_TYPE_UD));
170 vec4_instruction *inst = emit(ELK_GS_OPCODE_SET_DWORD_2, r0, elk_imm_ud(0u));
171 inst->force_writemask_all = true;
172
173 /* Create a virtual register to hold the vertex count */
174 this->vertex_count = src_reg(this, glsl_uint_type());
175
176 /* Initialize the vertex_count register to 0 */
177 this->current_annotation = "initialize vertex_count";
178 inst = emit(MOV(dst_reg(this->vertex_count), elk_imm_ud(0u)));
179 inst->force_writemask_all = true;
180
181 if (c->control_data_header_size_bits > 0) {
182 /* Create a virtual register to hold the current set of control data
183 * bits.
184 */
185 this->control_data_bits = src_reg(this, glsl_uint_type());
186
187 /* If we're outputting more than 32 control data bits, then EmitVertex()
188 * will set control_data_bits to 0 after emitting the first vertex.
189 * Otherwise, we need to initialize it to 0 here.
190 */
191 if (c->control_data_header_size_bits <= 32) {
192 this->current_annotation = "initialize control data bits";
193 inst = emit(MOV(dst_reg(this->control_data_bits), elk_imm_ud(0u)));
194 inst->force_writemask_all = true;
195 }
196 }
197
198 this->current_annotation = NULL;
199 }
200
201 void
emit_thread_end()202 vec4_gs_visitor::emit_thread_end()
203 {
204 if (c->control_data_header_size_bits > 0) {
205 /* During shader execution, we only ever call emit_control_data_bits()
206 * just prior to outputting a vertex. Therefore, the control data bits
207 * corresponding to the most recently output vertex still need to be
208 * emitted.
209 */
210 current_annotation = "thread end: emit control data bits";
211 emit_control_data_bits();
212 }
213
214 /* MRF 0 is reserved for the debugger, so start with message header
215 * in MRF 1.
216 */
217 int base_mrf = 1;
218
219 current_annotation = "thread end";
220 dst_reg mrf_reg(MRF, base_mrf);
221 src_reg r0(retype(elk_vec8_grf(0, 0), ELK_REGISTER_TYPE_UD));
222 vec4_instruction *inst = emit(MOV(mrf_reg, r0));
223 inst->force_writemask_all = true;
224 emit(ELK_GS_OPCODE_SET_VERTEX_COUNT, mrf_reg, this->vertex_count);
225 inst = emit(ELK_GS_OPCODE_THREAD_END);
226 inst->base_mrf = base_mrf;
227 inst->mlen = 1;
228 }
229
230
231 void
emit_urb_write_header(int mrf)232 vec4_gs_visitor::emit_urb_write_header(int mrf)
233 {
234 /* The SEND instruction that writes the vertex data to the VUE will use
235 * per_slot_offset=true, which means that DWORDs 3 and 4 of the message
236 * header specify an offset (in multiples of 256 bits) into the URB entry
237 * at which the write should take place.
238 *
239 * So we have to prepare a message header with the appropriate offset
240 * values.
241 */
242 dst_reg mrf_reg(MRF, mrf);
243 src_reg r0(retype(elk_vec8_grf(0, 0), ELK_REGISTER_TYPE_UD));
244 this->current_annotation = "URB write header";
245 vec4_instruction *inst = emit(MOV(mrf_reg, r0));
246 inst->force_writemask_all = true;
247 emit(ELK_GS_OPCODE_SET_WRITE_OFFSET, mrf_reg, this->vertex_count,
248 elk_imm_ud(gs_prog_data->output_vertex_size_hwords));
249 }
250
251
252 vec4_instruction *
emit_urb_write_opcode(bool complete)253 vec4_gs_visitor::emit_urb_write_opcode(bool complete)
254 {
255 /* We don't care whether the vertex is complete, because in general
256 * geometry shaders output multiple vertices, and we don't terminate the
257 * thread until all vertices are complete.
258 */
259 (void) complete;
260
261 vec4_instruction *inst = emit(ELK_VEC4_GS_OPCODE_URB_WRITE);
262 inst->offset = gs_prog_data->control_data_header_size_hwords;
263
264 inst->urb_write_flags = ELK_URB_WRITE_PER_SLOT_OFFSET;
265 return inst;
266 }
267
268
269 /**
270 * Write out a batch of 32 control data bits from the control_data_bits
271 * register to the URB.
272 *
273 * The current value of the vertex_count register determines which DWORD in
274 * the URB receives the control data bits. The control_data_bits register is
275 * assumed to contain the correct data for the vertex that was most recently
276 * output, and all previous vertices that share the same DWORD.
277 *
278 * This function takes care of ensuring that if no vertices have been output
279 * yet, no control bits are emitted.
280 */
281 void
emit_control_data_bits()282 vec4_gs_visitor::emit_control_data_bits()
283 {
284 assert(c->control_data_bits_per_vertex != 0);
285
286 /* Since the URB_WRITE_OWORD message operates with 128-bit (vec4 sized)
287 * granularity, we need to use two tricks to ensure that the batch of 32
288 * control data bits is written to the appropriate DWORD in the URB. To
289 * select which vec4 we are writing to, we use the "slot {0,1} offset"
290 * fields of the message header. To select which DWORD in the vec4 we are
291 * writing to, we use the channel mask fields of the message header. To
292 * avoid penalizing geometry shaders that emit a small number of vertices
293 * with extra bookkeeping, we only do each of these tricks when
294 * c->prog_data.control_data_header_size_bits is large enough to make it
295 * necessary.
296 *
297 * Note: this means that if we're outputting just a single DWORD of control
298 * data bits, we'll actually replicate it four times since we won't do any
299 * channel masking. But that's not a problem since in this case the
300 * hardware only pays attention to the first DWORD.
301 */
302 enum elk_urb_write_flags urb_write_flags = ELK_URB_WRITE_OWORD;
303 if (c->control_data_header_size_bits > 32)
304 urb_write_flags = urb_write_flags | ELK_URB_WRITE_USE_CHANNEL_MASKS;
305 if (c->control_data_header_size_bits > 128)
306 urb_write_flags = urb_write_flags | ELK_URB_WRITE_PER_SLOT_OFFSET;
307
308 /* If we are using either channel masks or a per-slot offset, then we
309 * need to figure out which DWORD we are trying to write to, using the
310 * formula:
311 *
312 * dword_index = (vertex_count - 1) * bits_per_vertex / 32
313 *
314 * Since bits_per_vertex is a power of two, and is known at compile
315 * time, this can be optimized to:
316 *
317 * dword_index = (vertex_count - 1) >> (6 - log2(bits_per_vertex))
318 */
319 src_reg dword_index(this, glsl_uint_type());
320 if (urb_write_flags) {
321 src_reg prev_count(this, glsl_uint_type());
322 emit(ADD(dst_reg(prev_count), this->vertex_count,
323 elk_imm_ud(0xffffffffu)));
324 unsigned log2_bits_per_vertex =
325 util_last_bit(c->control_data_bits_per_vertex);
326 emit(SHR(dst_reg(dword_index), prev_count,
327 elk_imm_ud(6 - log2_bits_per_vertex)));
328 }
329
330 /* Start building the URB write message. The first MRF gets a copy of
331 * R0.
332 */
333 int base_mrf = 1;
334 dst_reg mrf_reg(MRF, base_mrf);
335 src_reg r0(retype(elk_vec8_grf(0, 0), ELK_REGISTER_TYPE_UD));
336 vec4_instruction *inst = emit(MOV(mrf_reg, r0));
337 inst->force_writemask_all = true;
338
339 if (urb_write_flags & ELK_URB_WRITE_PER_SLOT_OFFSET) {
340 /* Set the per-slot offset to dword_index / 4, to that we'll write to
341 * the appropriate OWORD within the control data header.
342 */
343 src_reg per_slot_offset(this, glsl_uint_type());
344 emit(SHR(dst_reg(per_slot_offset), dword_index, elk_imm_ud(2u)));
345 emit(ELK_GS_OPCODE_SET_WRITE_OFFSET, mrf_reg, per_slot_offset,
346 elk_imm_ud(1u));
347 }
348
349 if (urb_write_flags & ELK_URB_WRITE_USE_CHANNEL_MASKS) {
350 /* Set the channel masks to 1 << (dword_index % 4), so that we'll
351 * write to the appropriate DWORD within the OWORD. We need to do
352 * this computation with force_writemask_all, otherwise garbage data
353 * from invocation 0 might clobber the mask for invocation 1 when
354 * ELK_GS_OPCODE_PREPARE_CHANNEL_MASKS tries to OR the two masks
355 * together.
356 */
357 src_reg channel(this, glsl_uint_type());
358 inst = emit(AND(dst_reg(channel), dword_index, elk_imm_ud(3u)));
359 inst->force_writemask_all = true;
360 src_reg one(this, glsl_uint_type());
361 inst = emit(MOV(dst_reg(one), elk_imm_ud(1u)));
362 inst->force_writemask_all = true;
363 src_reg channel_mask(this, glsl_uint_type());
364 inst = emit(SHL(dst_reg(channel_mask), one, channel));
365 inst->force_writemask_all = true;
366 emit(ELK_GS_OPCODE_PREPARE_CHANNEL_MASKS, dst_reg(channel_mask),
367 channel_mask);
368 emit(ELK_GS_OPCODE_SET_CHANNEL_MASKS, mrf_reg, channel_mask);
369 }
370
371 /* Store the control data bits in the message payload and send it. */
372 dst_reg mrf_reg2(MRF, base_mrf + 1);
373 inst = emit(MOV(mrf_reg2, this->control_data_bits));
374 inst->force_writemask_all = true;
375 inst = emit(ELK_VEC4_GS_OPCODE_URB_WRITE);
376 inst->urb_write_flags = urb_write_flags;
377 inst->base_mrf = base_mrf;
378 inst->mlen = 2;
379 }
380
381 void
set_stream_control_data_bits(unsigned stream_id)382 vec4_gs_visitor::set_stream_control_data_bits(unsigned stream_id)
383 {
384 /* control_data_bits |= stream_id << ((2 * (vertex_count - 1)) % 32) */
385
386 /* Note: we are calling this *before* increasing vertex_count, so
387 * this->vertex_count == vertex_count - 1 in the formula above.
388 */
389
390 /* Stream mode uses 2 bits per vertex */
391 assert(c->control_data_bits_per_vertex == 2);
392
393 /* Must be a valid stream */
394 assert(stream_id < 4); /* MAX_VERTEX_STREAMS */
395
396 /* Control data bits are initialized to 0 so we don't have to set any
397 * bits when sending vertices to stream 0.
398 */
399 if (stream_id == 0)
400 return;
401
402 /* reg::sid = stream_id */
403 src_reg sid(this, glsl_uint_type());
404 emit(MOV(dst_reg(sid), elk_imm_ud(stream_id)));
405
406 /* reg:shift_count = 2 * (vertex_count - 1) */
407 src_reg shift_count(this, glsl_uint_type());
408 emit(SHL(dst_reg(shift_count), this->vertex_count, elk_imm_ud(1u)));
409
410 /* Note: we're relying on the fact that the GEN SHL instruction only pays
411 * attention to the lower 5 bits of its second source argument, so on this
412 * architecture, stream_id << 2 * (vertex_count - 1) is equivalent to
413 * stream_id << ((2 * (vertex_count - 1)) % 32).
414 */
415 src_reg mask(this, glsl_uint_type());
416 emit(SHL(dst_reg(mask), sid, shift_count));
417 emit(OR(dst_reg(this->control_data_bits), this->control_data_bits, mask));
418 }
419
420 void
gs_emit_vertex(int stream_id)421 vec4_gs_visitor::gs_emit_vertex(int stream_id)
422 {
423 this->current_annotation = "emit vertex: safety check";
424
425 /* Haswell and later hardware ignores the "Render Stream Select" bits
426 * from the 3DSTATE_STREAMOUT packet when the SOL stage is disabled,
427 * and instead sends all primitives down the pipeline for rasterization.
428 * If the SOL stage is enabled, "Render Stream Select" is honored and
429 * primitives bound to non-zero streams are discarded after stream output.
430 *
431 * Since the only purpose of primives sent to non-zero streams is to
432 * be recorded by transform feedback, we can simply discard all geometry
433 * bound to these streams when transform feedback is disabled.
434 */
435 if (stream_id > 0 && !nir->info.has_transform_feedback_varyings)
436 return;
437
438 /* If we're outputting 32 control data bits or less, then we can wait
439 * until the shader is over to output them all. Otherwise we need to
440 * output them as we go. Now is the time to do it, since we're about to
441 * output the vertex_count'th vertex, so it's guaranteed that the
442 * control data bits associated with the (vertex_count - 1)th vertex are
443 * correct.
444 */
445 if (c->control_data_header_size_bits > 32) {
446 this->current_annotation = "emit vertex: emit control data bits";
447 /* Only emit control data bits if we've finished accumulating a batch
448 * of 32 bits. This is the case when:
449 *
450 * (vertex_count * bits_per_vertex) % 32 == 0
451 *
452 * (in other words, when the last 5 bits of vertex_count *
453 * bits_per_vertex are 0). Assuming bits_per_vertex == 2^n for some
454 * integer n (which is always the case, since bits_per_vertex is
455 * always 1 or 2), this is equivalent to requiring that the last 5-n
456 * bits of vertex_count are 0:
457 *
458 * vertex_count & (2^(5-n) - 1) == 0
459 *
460 * 2^(5-n) == 2^5 / 2^n == 32 / bits_per_vertex, so this is
461 * equivalent to:
462 *
463 * vertex_count & (32 / bits_per_vertex - 1) == 0
464 */
465 vec4_instruction *inst =
466 emit(AND(dst_null_ud(), this->vertex_count,
467 elk_imm_ud(32 / c->control_data_bits_per_vertex - 1)));
468 inst->conditional_mod = ELK_CONDITIONAL_Z;
469
470 emit(IF(ELK_PREDICATE_NORMAL));
471 {
472 /* If vertex_count is 0, then no control data bits have been
473 * accumulated yet, so we skip emitting them.
474 */
475 emit(CMP(dst_null_ud(), this->vertex_count, elk_imm_ud(0u),
476 ELK_CONDITIONAL_NEQ));
477 emit(IF(ELK_PREDICATE_NORMAL));
478 emit_control_data_bits();
479 emit(ELK_OPCODE_ENDIF);
480
481 /* Reset control_data_bits to 0 so we can start accumulating a new
482 * batch.
483 *
484 * Note: in the case where vertex_count == 0, this neutralizes the
485 * effect of any call to EndPrimitive() that the shader may have
486 * made before outputting its first vertex.
487 */
488 inst = emit(MOV(dst_reg(this->control_data_bits), elk_imm_ud(0u)));
489 inst->force_writemask_all = true;
490 }
491 emit(ELK_OPCODE_ENDIF);
492 }
493
494 this->current_annotation = "emit vertex: vertex data";
495 emit_vertex();
496
497 /* In stream mode we have to set control data bits for all vertices
498 * unless we have disabled control data bits completely (which we do
499 * do for MESA_PRIM_POINTS outputs that don't use streams).
500 */
501 if (c->control_data_header_size_bits > 0 &&
502 gs_prog_data->control_data_format ==
503 GFX7_GS_CONTROL_DATA_FORMAT_GSCTL_SID) {
504 this->current_annotation = "emit vertex: Stream control data bits";
505 set_stream_control_data_bits(stream_id);
506 }
507
508 this->current_annotation = NULL;
509 }
510
511 void
gs_end_primitive()512 vec4_gs_visitor::gs_end_primitive()
513 {
514 /* We can only do EndPrimitive() functionality when the control data
515 * consists of cut bits. Fortunately, the only time it isn't is when the
516 * output type is points, in which case EndPrimitive() is a no-op.
517 */
518 if (gs_prog_data->control_data_format !=
519 GFX7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT) {
520 return;
521 }
522
523 if (c->control_data_header_size_bits == 0)
524 return;
525
526 /* Cut bits use one bit per vertex. */
527 assert(c->control_data_bits_per_vertex == 1);
528
529 /* Cut bit n should be set to 1 if EndPrimitive() was called after emitting
530 * vertex n, 0 otherwise. So all we need to do here is mark bit
531 * (vertex_count - 1) % 32 in the cut_bits register to indicate that
532 * EndPrimitive() was called after emitting vertex (vertex_count - 1);
533 * vec4_gs_visitor::emit_control_data_bits() will take care of the rest.
534 *
535 * Note that if EndPrimitve() is called before emitting any vertices, this
536 * will cause us to set bit 31 of the control_data_bits register to 1.
537 * That's fine because:
538 *
539 * - If max_vertices < 32, then vertex number 31 (zero-based) will never be
540 * output, so the hardware will ignore cut bit 31.
541 *
542 * - If max_vertices == 32, then vertex number 31 is guaranteed to be the
543 * last vertex, so setting cut bit 31 has no effect (since the primitive
544 * is automatically ended when the GS terminates).
545 *
546 * - If max_vertices > 32, then the ir_emit_vertex visitor will reset the
547 * control_data_bits register to 0 when the first vertex is emitted.
548 */
549
550 /* control_data_bits |= 1 << ((vertex_count - 1) % 32) */
551 src_reg one(this, glsl_uint_type());
552 emit(MOV(dst_reg(one), elk_imm_ud(1u)));
553 src_reg prev_count(this, glsl_uint_type());
554 emit(ADD(dst_reg(prev_count), this->vertex_count, elk_imm_ud(0xffffffffu)));
555 src_reg mask(this, glsl_uint_type());
556 /* Note: we're relying on the fact that the GEN SHL instruction only pays
557 * attention to the lower 5 bits of its second source argument, so on this
558 * architecture, 1 << (vertex_count - 1) is equivalent to 1 <<
559 * ((vertex_count - 1) % 32).
560 */
561 emit(SHL(dst_reg(mask), one, prev_count));
562 emit(OR(dst_reg(this->control_data_bits), this->control_data_bits, mask));
563 }
564
565 static const GLuint gl_prim_to_hw_prim[MESA_PRIM_TRIANGLE_STRIP_ADJACENCY+1] = {
566 [MESA_PRIM_POINTS] =_3DPRIM_POINTLIST,
567 [MESA_PRIM_LINES] = _3DPRIM_LINELIST,
568 [MESA_PRIM_LINE_LOOP] = _3DPRIM_LINELOOP,
569 [MESA_PRIM_LINE_STRIP] = _3DPRIM_LINESTRIP,
570 [MESA_PRIM_TRIANGLES] = _3DPRIM_TRILIST,
571 [MESA_PRIM_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
572 [MESA_PRIM_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
573 [MESA_PRIM_QUADS] = _3DPRIM_QUADLIST,
574 [MESA_PRIM_QUAD_STRIP] = _3DPRIM_QUADSTRIP,
575 [MESA_PRIM_POLYGON] = _3DPRIM_POLYGON,
576 [MESA_PRIM_LINES_ADJACENCY] = _3DPRIM_LINELIST_ADJ,
577 [MESA_PRIM_LINE_STRIP_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ,
578 [MESA_PRIM_TRIANGLES_ADJACENCY] = _3DPRIM_TRILIST_ADJ,
579 [MESA_PRIM_TRIANGLE_STRIP_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
580 };
581
582 } /* namespace elk */
583
584 extern "C" const unsigned *
elk_compile_gs(const struct elk_compiler * compiler,struct elk_compile_gs_params * params)585 elk_compile_gs(const struct elk_compiler *compiler,
586 struct elk_compile_gs_params *params)
587 {
588 nir_shader *nir = params->base.nir;
589 const struct elk_gs_prog_key *key = params->key;
590 struct elk_gs_prog_data *prog_data = params->prog_data;
591
592 struct elk_gs_compile c;
593 memset(&c, 0, sizeof(c));
594 c.key = *key;
595
596 const bool is_scalar = compiler->scalar_stage[MESA_SHADER_GEOMETRY];
597 const bool debug_enabled = elk_should_print_shader(nir, DEBUG_GS);
598
599 prog_data->base.base.stage = MESA_SHADER_GEOMETRY;
600 prog_data->base.base.ray_queries = nir->info.ray_queries;
601 prog_data->base.base.total_scratch = 0;
602
603 /* The GLSL linker will have already matched up GS inputs and the outputs
604 * of prior stages. The driver does extend VS outputs in some cases, but
605 * only for legacy OpenGL or Gfx4-5 hardware, neither of which offer
606 * geometry shader support. So we can safely ignore that.
607 *
608 * For SSO pipelines, we use a fixed VUE map layout based on variable
609 * locations, so we can rely on rendezvous-by-location making this work.
610 */
611 GLbitfield64 inputs_read = nir->info.inputs_read;
612 elk_compute_vue_map(compiler->devinfo,
613 &c.input_vue_map, inputs_read,
614 nir->info.separate_shader, 1);
615
616 elk_nir_apply_key(nir, compiler, &key->base, 8);
617 elk_nir_lower_vue_inputs(nir, &c.input_vue_map);
618 elk_nir_lower_vue_outputs(nir);
619 elk_postprocess_nir(nir, compiler, debug_enabled,
620 key->base.robust_flags);
621
622 prog_data->base.clip_distance_mask =
623 ((1 << nir->info.clip_distance_array_size) - 1);
624 prog_data->base.cull_distance_mask =
625 ((1 << nir->info.cull_distance_array_size) - 1) <<
626 nir->info.clip_distance_array_size;
627
628 prog_data->include_primitive_id =
629 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_PRIMITIVE_ID);
630
631 prog_data->invocations = nir->info.gs.invocations;
632
633 if (compiler->devinfo->ver >= 8)
634 nir_gs_count_vertices_and_primitives(
635 nir, &prog_data->static_vertex_count, nullptr, nullptr, 1u);
636
637 if (compiler->devinfo->ver >= 7) {
638 if (nir->info.gs.output_primitive == MESA_PRIM_POINTS) {
639 /* When the output type is points, the geometry shader may output data
640 * to multiple streams, and EndPrimitive() has no effect. So we
641 * configure the hardware to interpret the control data as stream ID.
642 */
643 prog_data->control_data_format = GFX7_GS_CONTROL_DATA_FORMAT_GSCTL_SID;
644
645 /* We only have to emit control bits if we are using non-zero streams */
646 if (nir->info.gs.active_stream_mask != (1 << 0))
647 c.control_data_bits_per_vertex = 2;
648 else
649 c.control_data_bits_per_vertex = 0;
650 } else {
651 /* When the output type is triangle_strip or line_strip, EndPrimitive()
652 * may be used to terminate the current strip and start a new one
653 * (similar to primitive restart), and outputting data to multiple
654 * streams is not supported. So we configure the hardware to interpret
655 * the control data as EndPrimitive information (a.k.a. "cut bits").
656 */
657 prog_data->control_data_format = GFX7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT;
658
659 /* We only need to output control data if the shader actually calls
660 * EndPrimitive().
661 */
662 c.control_data_bits_per_vertex =
663 nir->info.gs.uses_end_primitive ? 1 : 0;
664 }
665 } else {
666 /* There are no control data bits in gfx6. */
667 c.control_data_bits_per_vertex = 0;
668 }
669 c.control_data_header_size_bits =
670 nir->info.gs.vertices_out * c.control_data_bits_per_vertex;
671
672 /* 1 HWORD = 32 bytes = 256 bits */
673 prog_data->control_data_header_size_hwords =
674 ALIGN(c.control_data_header_size_bits, 256) / 256;
675
676 /* Compute the output vertex size.
677 *
678 * From the Ivy Bridge PRM, Vol2 Part1 7.2.1.1 STATE_GS - Output Vertex
679 * Size (p168):
680 *
681 * [0,62] indicating [1,63] 16B units
682 *
683 * Specifies the size of each vertex stored in the GS output entry
684 * (following any Control Header data) as a number of 128-bit units
685 * (minus one).
686 *
687 * Programming Restrictions: The vertex size must be programmed as a
688 * multiple of 32B units with the following exception: Rendering is
689 * disabled (as per SOL stage state) and the vertex size output by the
690 * GS thread is 16B.
691 *
692 * If rendering is enabled (as per SOL state) the vertex size must be
693 * programmed as a multiple of 32B units. In other words, the only time
694 * software can program a vertex size with an odd number of 16B units
695 * is when rendering is disabled.
696 *
697 * Note: B=bytes in the above text.
698 *
699 * It doesn't seem worth the extra trouble to optimize the case where the
700 * vertex size is 16B (especially since this would require special-casing
701 * the GEN assembly that writes to the URB). So we just set the vertex
702 * size to a multiple of 32B (2 vec4's) in all cases.
703 *
704 * The maximum output vertex size is 62*16 = 992 bytes (31 hwords). We
705 * budget that as follows:
706 *
707 * 512 bytes for varyings (a varying component is 4 bytes and
708 * gl_MaxGeometryOutputComponents = 128)
709 * 16 bytes overhead for VARYING_SLOT_PSIZ (each varying slot is 16
710 * bytes)
711 * 16 bytes overhead for gl_Position (we allocate it a slot in the VUE
712 * even if it's not used)
713 * 32 bytes overhead for gl_ClipDistance (we allocate it 2 VUE slots
714 * whenever clip planes are enabled, even if the shader doesn't
715 * write to gl_ClipDistance)
716 * 16 bytes overhead since the VUE size must be a multiple of 32 bytes
717 * (see below)--this causes up to 1 VUE slot to be wasted
718 * 400 bytes available for varying packing overhead
719 *
720 * Worst-case varying packing overhead is 3/4 of a varying slot (12 bytes)
721 * per interpolation type, so this is plenty.
722 *
723 */
724 unsigned output_vertex_size_bytes = prog_data->base.vue_map.num_slots * 16;
725 assert(compiler->devinfo->ver == 6 ||
726 output_vertex_size_bytes <= GFX7_MAX_GS_OUTPUT_VERTEX_SIZE_BYTES);
727 prog_data->output_vertex_size_hwords =
728 ALIGN(output_vertex_size_bytes, 32) / 32;
729
730 /* Compute URB entry size. The maximum allowed URB entry size is 32k.
731 * That divides up as follows:
732 *
733 * 64 bytes for the control data header (cut indices or StreamID bits)
734 * 4096 bytes for varyings (a varying component is 4 bytes and
735 * gl_MaxGeometryTotalOutputComponents = 1024)
736 * 4096 bytes overhead for VARYING_SLOT_PSIZ (each varying slot is 16
737 * bytes/vertex and gl_MaxGeometryOutputVertices is 256)
738 * 4096 bytes overhead for gl_Position (we allocate it a slot in the VUE
739 * even if it's not used)
740 * 8192 bytes overhead for gl_ClipDistance (we allocate it 2 VUE slots
741 * whenever clip planes are enabled, even if the shader doesn't
742 * write to gl_ClipDistance)
743 * 4096 bytes overhead since the VUE size must be a multiple of 32
744 * bytes (see above)--this causes up to 1 VUE slot to be wasted
745 * 8128 bytes available for varying packing overhead
746 *
747 * Worst-case varying packing overhead is 3/4 of a varying slot per
748 * interpolation type, which works out to 3072 bytes, so this would allow
749 * us to accommodate 2 interpolation types without any danger of running
750 * out of URB space.
751 *
752 * In practice, the risk of running out of URB space is very small, since
753 * the above figures are all worst-case, and most of them scale with the
754 * number of output vertices. So we'll just calculate the amount of space
755 * we need, and if it's too large, fail to compile.
756 *
757 * The above is for gfx7+ where we have a single URB entry that will hold
758 * all the output. In gfx6, we will have to allocate URB entries for every
759 * vertex we emit, so our URB entries only need to be large enough to hold
760 * a single vertex. Also, gfx6 does not have a control data header.
761 */
762 unsigned output_size_bytes;
763 if (compiler->devinfo->ver >= 7) {
764 output_size_bytes =
765 prog_data->output_vertex_size_hwords * 32 * nir->info.gs.vertices_out;
766 output_size_bytes += 32 * prog_data->control_data_header_size_hwords;
767 } else {
768 output_size_bytes = prog_data->output_vertex_size_hwords * 32;
769 }
770
771 /* Broadwell stores "Vertex Count" as a full 8 DWord (32 byte) URB output,
772 * which comes before the control header.
773 */
774 if (compiler->devinfo->ver >= 8)
775 output_size_bytes += 32;
776
777 /* Shaders can technically set max_vertices = 0, at which point we
778 * may have a URB size of 0 bytes. Nothing good can come from that,
779 * so enforce a minimum size.
780 */
781 if (output_size_bytes == 0)
782 output_size_bytes = 1;
783
784 unsigned max_output_size_bytes = GFX7_MAX_GS_URB_ENTRY_SIZE_BYTES;
785 if (compiler->devinfo->ver == 6)
786 max_output_size_bytes = GFX6_MAX_GS_URB_ENTRY_SIZE_BYTES;
787 if (output_size_bytes > max_output_size_bytes)
788 return NULL;
789
790
791 /* URB entry sizes are stored as a multiple of 64 bytes in gfx7+ and
792 * a multiple of 128 bytes in gfx6.
793 */
794 if (compiler->devinfo->ver >= 7) {
795 prog_data->base.urb_entry_size = ALIGN(output_size_bytes, 64) / 64;
796 } else {
797 prog_data->base.urb_entry_size = ALIGN(output_size_bytes, 128) / 128;
798 }
799
800 assert(nir->info.gs.output_primitive < ARRAY_SIZE(elk::gl_prim_to_hw_prim));
801 prog_data->output_topology =
802 elk::gl_prim_to_hw_prim[nir->info.gs.output_primitive];
803
804 prog_data->vertices_in = nir->info.gs.vertices_in;
805
806 /* GS inputs are read from the VUE 256 bits (2 vec4's) at a time, so we
807 * need to program a URB read length of ceiling(num_slots / 2).
808 */
809 prog_data->base.urb_read_length = (c.input_vue_map.num_slots + 1) / 2;
810
811 /* Now that prog_data setup is done, we are ready to actually compile the
812 * program.
813 */
814 if (unlikely(debug_enabled)) {
815 fprintf(stderr, "GS Input ");
816 elk_print_vue_map(stderr, &c.input_vue_map, MESA_SHADER_GEOMETRY);
817 fprintf(stderr, "GS Output ");
818 elk_print_vue_map(stderr, &prog_data->base.vue_map, MESA_SHADER_GEOMETRY);
819 }
820
821 if (is_scalar) {
822 elk_fs_visitor v(compiler, ¶ms->base, &c, prog_data, nir,
823 params->base.stats != NULL, debug_enabled);
824 if (v.run_gs()) {
825 prog_data->base.dispatch_mode = INTEL_DISPATCH_MODE_SIMD8;
826
827 assert(v.payload().num_regs % reg_unit(compiler->devinfo) == 0);
828 prog_data->base.base.dispatch_grf_start_reg =
829 v.payload().num_regs / reg_unit(compiler->devinfo);
830
831 elk_fs_generator g(compiler, ¶ms->base,
832 &prog_data->base.base, false, MESA_SHADER_GEOMETRY);
833 if (unlikely(debug_enabled)) {
834 const char *label =
835 nir->info.label ? nir->info.label : "unnamed";
836 char *name = ralloc_asprintf(params->base.mem_ctx,
837 "%s geometry shader %s",
838 label, nir->info.name);
839 g.enable_debug(name);
840 }
841 g.generate_code(v.cfg, v.dispatch_width, v.shader_stats,
842 v.performance_analysis.require(), params->base.stats);
843 g.add_const_data(nir->constant_data, nir->constant_data_size);
844 return g.get_assembly();
845 }
846
847 params->base.error_str = ralloc_strdup(params->base.mem_ctx, v.fail_msg);
848
849 return NULL;
850 }
851
852 if (compiler->devinfo->ver >= 7) {
853 /* Compile the geometry shader in DUAL_OBJECT dispatch mode, if we can do
854 * so without spilling. If the GS invocations count > 1, then we can't use
855 * dual object mode.
856 */
857 if (prog_data->invocations <= 1 &&
858 !INTEL_DEBUG(DEBUG_NO_DUAL_OBJECT_GS)) {
859 prog_data->base.dispatch_mode = INTEL_DISPATCH_MODE_4X2_DUAL_OBJECT;
860
861 elk::vec4_gs_visitor v(compiler, ¶ms->base, &c, prog_data, nir,
862 true /* no_spills */,
863 debug_enabled);
864
865 /* Backup 'nr_params' and 'param' as they can be modified by the
866 * the DUAL_OBJECT visitor. If it fails, we will run the fallback
867 * (DUAL_INSTANCED or SINGLE mode) and we need to restore original
868 * values.
869 */
870 const unsigned param_count = prog_data->base.base.nr_params;
871 uint32_t *param = ralloc_array(NULL, uint32_t, param_count);
872 memcpy(param, prog_data->base.base.param,
873 sizeof(uint32_t) * param_count);
874
875 if (v.run()) {
876 /* Success! Backup is not needed */
877 ralloc_free(param);
878 return elk_vec4_generate_assembly(compiler, ¶ms->base,
879 nir, &prog_data->base,
880 v.cfg,
881 v.performance_analysis.require(),
882 debug_enabled);
883 } else {
884 /* These variables could be modified by the execution of the GS
885 * visitor if it packed the uniforms in the push constant buffer.
886 * As it failed, we need restore them so we can start again with
887 * DUAL_INSTANCED or SINGLE mode.
888 *
889 * FIXME: Could more variables be modified by this execution?
890 */
891 memcpy(prog_data->base.base.param, param,
892 sizeof(uint32_t) * param_count);
893 prog_data->base.base.nr_params = param_count;
894 ralloc_free(param);
895 }
896 }
897 }
898
899 /* Either we failed to compile in DUAL_OBJECT mode (probably because it
900 * would have required spilling) or DUAL_OBJECT mode is disabled. So fall
901 * back to DUAL_INSTANCED or SINGLE mode, which consumes fewer registers.
902 *
903 * FIXME: Single dispatch mode requires that the driver can handle
904 * interleaving of input registers, but this is already supported (dual
905 * instance mode has the same requirement). However, to take full advantage
906 * of single dispatch mode to reduce register pressure we would also need to
907 * do interleaved outputs, but currently, the vec4 visitor and generator
908 * classes do not support this, so at the moment register pressure in
909 * single and dual instance modes is the same.
910 *
911 * From the Ivy Bridge PRM, Vol2 Part1 7.2.1.1 "3DSTATE_GS"
912 * "If InstanceCount>1, DUAL_OBJECT mode is invalid. Software will likely
913 * want to use DUAL_INSTANCE mode for higher performance, but SINGLE mode
914 * is also supported. When InstanceCount=1 (one instance per object) software
915 * can decide which dispatch mode to use. DUAL_OBJECT mode would likely be
916 * the best choice for performance, followed by SINGLE mode."
917 *
918 * So SINGLE mode is more performant when invocations == 1 and DUAL_INSTANCE
919 * mode is more performant when invocations > 1. Gfx6 only supports
920 * SINGLE mode.
921 */
922 if (prog_data->invocations <= 1 || compiler->devinfo->ver < 7)
923 prog_data->base.dispatch_mode = INTEL_DISPATCH_MODE_4X1_SINGLE;
924 else
925 prog_data->base.dispatch_mode = INTEL_DISPATCH_MODE_4X2_DUAL_INSTANCE;
926
927 elk::vec4_gs_visitor *gs = NULL;
928 const unsigned *ret = NULL;
929
930 if (compiler->devinfo->ver >= 7)
931 gs = new elk::vec4_gs_visitor(compiler, ¶ms->base, &c, prog_data,
932 nir, false /* no_spills */,
933 debug_enabled);
934 else
935 gs = new elk::gfx6_gs_visitor(compiler, ¶ms->base, &c, prog_data,
936 nir, false /* no_spills */,
937 debug_enabled);
938
939 if (!gs->run()) {
940 params->base.error_str =
941 ralloc_strdup(params->base.mem_ctx, gs->fail_msg);
942 } else {
943 ret = elk_vec4_generate_assembly(compiler, ¶ms->base, nir,
944 &prog_data->base, gs->cfg,
945 gs->performance_analysis.require(),
946 debug_enabled);
947 }
948
949 delete gs;
950 return ret;
951 }
952