1 /*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file brw_vec4_gs_visitor.cpp
26 *
27 * Geometry-shader-specific code derived from the vec4_visitor class.
28 */
29
30 #include "brw_vec4_gs_visitor.h"
31 #include "gen6_gs_visitor.h"
32 #include "brw_cfg.h"
33 #include "brw_fs.h"
34 #include "brw_nir.h"
35 #include "common/gen_debug.h"
36
37 namespace brw {
38
vec4_gs_visitor(const struct brw_compiler * compiler,void * log_data,struct brw_gs_compile * c,struct brw_gs_prog_data * prog_data,const nir_shader * shader,void * mem_ctx,bool no_spills,int shader_time_index)39 vec4_gs_visitor::vec4_gs_visitor(const struct brw_compiler *compiler,
40 void *log_data,
41 struct brw_gs_compile *c,
42 struct brw_gs_prog_data *prog_data,
43 const nir_shader *shader,
44 void *mem_ctx,
45 bool no_spills,
46 int shader_time_index)
47 : vec4_visitor(compiler, log_data, &c->key.tex,
48 &prog_data->base, shader, mem_ctx,
49 no_spills, shader_time_index),
50 c(c),
51 gs_prog_data(prog_data)
52 {
53 }
54
55
56 static inline struct brw_reg
attribute_to_hw_reg(int attr,brw_reg_type type,bool interleaved)57 attribute_to_hw_reg(int attr, brw_reg_type type, bool interleaved)
58 {
59 struct brw_reg reg;
60
61 unsigned width = REG_SIZE / 2 / MAX2(4, type_sz(type));
62 if (interleaved) {
63 reg = stride(brw_vecn_grf(width, attr / 2, (attr % 2) * 4), 0, width, 1);
64 } else {
65 reg = brw_vecn_grf(width, attr, 0);
66 }
67
68 reg.type = type;
69 return reg;
70 }
71
72 /**
73 * Replace each register of type ATTR in this->instructions with a reference
74 * to a fixed HW register.
75 *
76 * If interleaved is true, then each attribute takes up half a register, with
77 * register N containing attribute 2*N in its first half and attribute 2*N+1
78 * in its second half (this corresponds to the payload setup used by geometry
79 * shaders in "single" or "dual instanced" dispatch mode). If interleaved is
80 * false, then each attribute takes up a whole register, with register N
81 * containing attribute N (this corresponds to the payload setup used by
82 * vertex shaders, and by geometry shaders in "dual object" dispatch mode).
83 */
84 int
setup_varying_inputs(int payload_reg,int attributes_per_reg)85 vec4_gs_visitor::setup_varying_inputs(int payload_reg,
86 int attributes_per_reg)
87 {
88 /* For geometry shaders there are N copies of the input attributes, where N
89 * is the number of input vertices. attribute_map[BRW_VARYING_SLOT_COUNT *
90 * i + j] represents attribute j for vertex i.
91 *
92 * Note that GS inputs are read from the VUE 256 bits (2 vec4's) at a time,
93 * so the total number of input slots that will be delivered to the GS (and
94 * thus the stride of the input arrays) is urb_read_length * 2.
95 */
96 const unsigned num_input_vertices = nir->info.gs.vertices_in;
97 assert(num_input_vertices <= MAX_GS_INPUT_VERTICES);
98 unsigned input_array_stride = prog_data->urb_read_length * 2;
99
100 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
101 for (int i = 0; i < 3; i++) {
102 if (inst->src[i].file != ATTR)
103 continue;
104
105 assert(inst->src[i].offset % REG_SIZE == 0);
106 int grf = payload_reg * attributes_per_reg +
107 inst->src[i].nr + inst->src[i].offset / REG_SIZE;
108
109 struct brw_reg reg =
110 attribute_to_hw_reg(grf, inst->src[i].type, attributes_per_reg > 1);
111 reg.swizzle = inst->src[i].swizzle;
112 if (inst->src[i].abs)
113 reg = brw_abs(reg);
114 if (inst->src[i].negate)
115 reg = negate(reg);
116
117 inst->src[i] = reg;
118 }
119 }
120
121 int regs_used = ALIGN(input_array_stride * num_input_vertices,
122 attributes_per_reg) / attributes_per_reg;
123 return payload_reg + regs_used;
124 }
125
126 void
setup_payload()127 vec4_gs_visitor::setup_payload()
128 {
129 /* If we are in dual instanced or single mode, then attributes are going
130 * to be interleaved, so one register contains two attribute slots.
131 */
132 int attributes_per_reg =
133 prog_data->dispatch_mode == DISPATCH_MODE_4X2_DUAL_OBJECT ? 1 : 2;
134
135 int reg = 0;
136
137 /* The payload always contains important data in r0, which contains
138 * the URB handles that are passed on to the URB write at the end
139 * of the thread.
140 */
141 reg++;
142
143 /* If the shader uses gl_PrimitiveIDIn, that goes in r1. */
144 if (gs_prog_data->include_primitive_id)
145 reg++;
146
147 reg = setup_uniforms(reg);
148
149 reg = setup_varying_inputs(reg, attributes_per_reg);
150
151 this->first_non_payload_grf = reg;
152 }
153
154
155 void
emit_prolog()156 vec4_gs_visitor::emit_prolog()
157 {
158 /* In vertex shaders, r0.2 is guaranteed to be initialized to zero. In
159 * geometry shaders, it isn't (it contains a bunch of information we don't
160 * need, like the input primitive type). We need r0.2 to be zero in order
161 * to build scratch read/write messages correctly (otherwise this value
162 * will be interpreted as a global offset, causing us to do our scratch
163 * reads/writes to garbage memory). So just set it to zero at the top of
164 * the shader.
165 */
166 this->current_annotation = "clear r0.2";
167 dst_reg r0(retype(brw_vec4_grf(0, 0), BRW_REGISTER_TYPE_UD));
168 vec4_instruction *inst = emit(GS_OPCODE_SET_DWORD_2, r0, brw_imm_ud(0u));
169 inst->force_writemask_all = true;
170
171 /* Create a virtual register to hold the vertex count */
172 this->vertex_count = src_reg(this, glsl_type::uint_type);
173
174 /* Initialize the vertex_count register to 0 */
175 this->current_annotation = "initialize vertex_count";
176 inst = emit(MOV(dst_reg(this->vertex_count), brw_imm_ud(0u)));
177 inst->force_writemask_all = true;
178
179 if (c->control_data_header_size_bits > 0) {
180 /* Create a virtual register to hold the current set of control data
181 * bits.
182 */
183 this->control_data_bits = src_reg(this, glsl_type::uint_type);
184
185 /* If we're outputting more than 32 control data bits, then EmitVertex()
186 * will set control_data_bits to 0 after emitting the first vertex.
187 * Otherwise, we need to initialize it to 0 here.
188 */
189 if (c->control_data_header_size_bits <= 32) {
190 this->current_annotation = "initialize control data bits";
191 inst = emit(MOV(dst_reg(this->control_data_bits), brw_imm_ud(0u)));
192 inst->force_writemask_all = true;
193 }
194 }
195
196 this->current_annotation = NULL;
197 }
198
199 void
emit_thread_end()200 vec4_gs_visitor::emit_thread_end()
201 {
202 if (c->control_data_header_size_bits > 0) {
203 /* During shader execution, we only ever call emit_control_data_bits()
204 * just prior to outputting a vertex. Therefore, the control data bits
205 * corresponding to the most recently output vertex still need to be
206 * emitted.
207 */
208 current_annotation = "thread end: emit control data bits";
209 emit_control_data_bits();
210 }
211
212 /* MRF 0 is reserved for the debugger, so start with message header
213 * in MRF 1.
214 */
215 int base_mrf = 1;
216
217 bool static_vertex_count = gs_prog_data->static_vertex_count != -1;
218
219 /* If the previous instruction was a URB write, we don't need to issue
220 * a second one - we can just set the EOT bit on the previous write.
221 *
222 * Skip this on Gen8+ unless there's a static vertex count, as we also
223 * need to write the vertex count out, and combining the two may not be
224 * possible (or at least not straightforward).
225 */
226 vec4_instruction *last = (vec4_instruction *) instructions.get_tail();
227 if (last && last->opcode == GS_OPCODE_URB_WRITE &&
228 !(INTEL_DEBUG & DEBUG_SHADER_TIME) &&
229 devinfo->gen >= 8 && static_vertex_count) {
230 last->urb_write_flags = BRW_URB_WRITE_EOT | last->urb_write_flags;
231 return;
232 }
233
234 current_annotation = "thread end";
235 dst_reg mrf_reg(MRF, base_mrf);
236 src_reg r0(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
237 vec4_instruction *inst = emit(MOV(mrf_reg, r0));
238 inst->force_writemask_all = true;
239 if (devinfo->gen < 8 || !static_vertex_count)
240 emit(GS_OPCODE_SET_VERTEX_COUNT, mrf_reg, this->vertex_count);
241 if (INTEL_DEBUG & DEBUG_SHADER_TIME)
242 emit_shader_time_end();
243 inst = emit(GS_OPCODE_THREAD_END);
244 inst->base_mrf = base_mrf;
245 inst->mlen = devinfo->gen >= 8 && !static_vertex_count ? 2 : 1;
246 }
247
248
249 void
emit_urb_write_header(int mrf)250 vec4_gs_visitor::emit_urb_write_header(int mrf)
251 {
252 /* The SEND instruction that writes the vertex data to the VUE will use
253 * per_slot_offset=true, which means that DWORDs 3 and 4 of the message
254 * header specify an offset (in multiples of 256 bits) into the URB entry
255 * at which the write should take place.
256 *
257 * So we have to prepare a message header with the appropriate offset
258 * values.
259 */
260 dst_reg mrf_reg(MRF, mrf);
261 src_reg r0(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
262 this->current_annotation = "URB write header";
263 vec4_instruction *inst = emit(MOV(mrf_reg, r0));
264 inst->force_writemask_all = true;
265 emit(GS_OPCODE_SET_WRITE_OFFSET, mrf_reg, this->vertex_count,
266 brw_imm_ud(gs_prog_data->output_vertex_size_hwords));
267 }
268
269
270 vec4_instruction *
emit_urb_write_opcode(bool complete)271 vec4_gs_visitor::emit_urb_write_opcode(bool complete)
272 {
273 /* We don't care whether the vertex is complete, because in general
274 * geometry shaders output multiple vertices, and we don't terminate the
275 * thread until all vertices are complete.
276 */
277 (void) complete;
278
279 vec4_instruction *inst = emit(GS_OPCODE_URB_WRITE);
280 inst->offset = gs_prog_data->control_data_header_size_hwords;
281
282 /* We need to increment Global Offset by 1 to make room for Broadwell's
283 * extra "Vertex Count" payload at the beginning of the URB entry.
284 */
285 if (devinfo->gen >= 8 && gs_prog_data->static_vertex_count == -1)
286 inst->offset++;
287
288 inst->urb_write_flags = BRW_URB_WRITE_PER_SLOT_OFFSET;
289 return inst;
290 }
291
292
293 /**
294 * Write out a batch of 32 control data bits from the control_data_bits
295 * register to the URB.
296 *
297 * The current value of the vertex_count register determines which DWORD in
298 * the URB receives the control data bits. The control_data_bits register is
299 * assumed to contain the correct data for the vertex that was most recently
300 * output, and all previous vertices that share the same DWORD.
301 *
302 * This function takes care of ensuring that if no vertices have been output
303 * yet, no control bits are emitted.
304 */
305 void
emit_control_data_bits()306 vec4_gs_visitor::emit_control_data_bits()
307 {
308 assert(c->control_data_bits_per_vertex != 0);
309
310 /* Since the URB_WRITE_OWORD message operates with 128-bit (vec4 sized)
311 * granularity, we need to use two tricks to ensure that the batch of 32
312 * control data bits is written to the appropriate DWORD in the URB. To
313 * select which vec4 we are writing to, we use the "slot {0,1} offset"
314 * fields of the message header. To select which DWORD in the vec4 we are
315 * writing to, we use the channel mask fields of the message header. To
316 * avoid penalizing geometry shaders that emit a small number of vertices
317 * with extra bookkeeping, we only do each of these tricks when
318 * c->prog_data.control_data_header_size_bits is large enough to make it
319 * necessary.
320 *
321 * Note: this means that if we're outputting just a single DWORD of control
322 * data bits, we'll actually replicate it four times since we won't do any
323 * channel masking. But that's not a problem since in this case the
324 * hardware only pays attention to the first DWORD.
325 */
326 enum brw_urb_write_flags urb_write_flags = BRW_URB_WRITE_OWORD;
327 if (c->control_data_header_size_bits > 32)
328 urb_write_flags = urb_write_flags | BRW_URB_WRITE_USE_CHANNEL_MASKS;
329 if (c->control_data_header_size_bits > 128)
330 urb_write_flags = urb_write_flags | BRW_URB_WRITE_PER_SLOT_OFFSET;
331
332 /* If we are using either channel masks or a per-slot offset, then we
333 * need to figure out which DWORD we are trying to write to, using the
334 * formula:
335 *
336 * dword_index = (vertex_count - 1) * bits_per_vertex / 32
337 *
338 * Since bits_per_vertex is a power of two, and is known at compile
339 * time, this can be optimized to:
340 *
341 * dword_index = (vertex_count - 1) >> (6 - log2(bits_per_vertex))
342 */
343 src_reg dword_index(this, glsl_type::uint_type);
344 if (urb_write_flags) {
345 src_reg prev_count(this, glsl_type::uint_type);
346 emit(ADD(dst_reg(prev_count), this->vertex_count,
347 brw_imm_ud(0xffffffffu)));
348 unsigned log2_bits_per_vertex =
349 util_last_bit(c->control_data_bits_per_vertex);
350 emit(SHR(dst_reg(dword_index), prev_count,
351 brw_imm_ud(6 - log2_bits_per_vertex)));
352 }
353
354 /* Start building the URB write message. The first MRF gets a copy of
355 * R0.
356 */
357 int base_mrf = 1;
358 dst_reg mrf_reg(MRF, base_mrf);
359 src_reg r0(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
360 vec4_instruction *inst = emit(MOV(mrf_reg, r0));
361 inst->force_writemask_all = true;
362
363 if (urb_write_flags & BRW_URB_WRITE_PER_SLOT_OFFSET) {
364 /* Set the per-slot offset to dword_index / 4, to that we'll write to
365 * the appropriate OWORD within the control data header.
366 */
367 src_reg per_slot_offset(this, glsl_type::uint_type);
368 emit(SHR(dst_reg(per_slot_offset), dword_index, brw_imm_ud(2u)));
369 emit(GS_OPCODE_SET_WRITE_OFFSET, mrf_reg, per_slot_offset,
370 brw_imm_ud(1u));
371 }
372
373 if (urb_write_flags & BRW_URB_WRITE_USE_CHANNEL_MASKS) {
374 /* Set the channel masks to 1 << (dword_index % 4), so that we'll
375 * write to the appropriate DWORD within the OWORD. We need to do
376 * this computation with force_writemask_all, otherwise garbage data
377 * from invocation 0 might clobber the mask for invocation 1 when
378 * GS_OPCODE_PREPARE_CHANNEL_MASKS tries to OR the two masks
379 * together.
380 */
381 src_reg channel(this, glsl_type::uint_type);
382 inst = emit(AND(dst_reg(channel), dword_index, brw_imm_ud(3u)));
383 inst->force_writemask_all = true;
384 src_reg one(this, glsl_type::uint_type);
385 inst = emit(MOV(dst_reg(one), brw_imm_ud(1u)));
386 inst->force_writemask_all = true;
387 src_reg channel_mask(this, glsl_type::uint_type);
388 inst = emit(SHL(dst_reg(channel_mask), one, channel));
389 inst->force_writemask_all = true;
390 emit(GS_OPCODE_PREPARE_CHANNEL_MASKS, dst_reg(channel_mask),
391 channel_mask);
392 emit(GS_OPCODE_SET_CHANNEL_MASKS, mrf_reg, channel_mask);
393 }
394
395 /* Store the control data bits in the message payload and send it. */
396 dst_reg mrf_reg2(MRF, base_mrf + 1);
397 inst = emit(MOV(mrf_reg2, this->control_data_bits));
398 inst->force_writemask_all = true;
399 inst = emit(GS_OPCODE_URB_WRITE);
400 inst->urb_write_flags = urb_write_flags;
401 /* We need to increment Global Offset by 256-bits to make room for
402 * Broadwell's extra "Vertex Count" payload at the beginning of the
403 * URB entry. Since this is an OWord message, Global Offset is counted
404 * in 128-bit units, so we must set it to 2.
405 */
406 if (devinfo->gen >= 8 && gs_prog_data->static_vertex_count == -1)
407 inst->offset = 2;
408 inst->base_mrf = base_mrf;
409 inst->mlen = 2;
410 }
411
412 void
set_stream_control_data_bits(unsigned stream_id)413 vec4_gs_visitor::set_stream_control_data_bits(unsigned stream_id)
414 {
415 /* control_data_bits |= stream_id << ((2 * (vertex_count - 1)) % 32) */
416
417 /* Note: we are calling this *before* increasing vertex_count, so
418 * this->vertex_count == vertex_count - 1 in the formula above.
419 */
420
421 /* Stream mode uses 2 bits per vertex */
422 assert(c->control_data_bits_per_vertex == 2);
423
424 /* Must be a valid stream */
425 assert(stream_id < MAX_VERTEX_STREAMS);
426
427 /* Control data bits are initialized to 0 so we don't have to set any
428 * bits when sending vertices to stream 0.
429 */
430 if (stream_id == 0)
431 return;
432
433 /* reg::sid = stream_id */
434 src_reg sid(this, glsl_type::uint_type);
435 emit(MOV(dst_reg(sid), brw_imm_ud(stream_id)));
436
437 /* reg:shift_count = 2 * (vertex_count - 1) */
438 src_reg shift_count(this, glsl_type::uint_type);
439 emit(SHL(dst_reg(shift_count), this->vertex_count, brw_imm_ud(1u)));
440
441 /* Note: we're relying on the fact that the GEN SHL instruction only pays
442 * attention to the lower 5 bits of its second source argument, so on this
443 * architecture, stream_id << 2 * (vertex_count - 1) is equivalent to
444 * stream_id << ((2 * (vertex_count - 1)) % 32).
445 */
446 src_reg mask(this, glsl_type::uint_type);
447 emit(SHL(dst_reg(mask), sid, shift_count));
448 emit(OR(dst_reg(this->control_data_bits), this->control_data_bits, mask));
449 }
450
451 void
gs_emit_vertex(int stream_id)452 vec4_gs_visitor::gs_emit_vertex(int stream_id)
453 {
454 this->current_annotation = "emit vertex: safety check";
455
456 /* Haswell and later hardware ignores the "Render Stream Select" bits
457 * from the 3DSTATE_STREAMOUT packet when the SOL stage is disabled,
458 * and instead sends all primitives down the pipeline for rasterization.
459 * If the SOL stage is enabled, "Render Stream Select" is honored and
460 * primitives bound to non-zero streams are discarded after stream output.
461 *
462 * Since the only purpose of primives sent to non-zero streams is to
463 * be recorded by transform feedback, we can simply discard all geometry
464 * bound to these streams when transform feedback is disabled.
465 */
466 if (stream_id > 0 && !nir->info.has_transform_feedback_varyings)
467 return;
468
469 /* If we're outputting 32 control data bits or less, then we can wait
470 * until the shader is over to output them all. Otherwise we need to
471 * output them as we go. Now is the time to do it, since we're about to
472 * output the vertex_count'th vertex, so it's guaranteed that the
473 * control data bits associated with the (vertex_count - 1)th vertex are
474 * correct.
475 */
476 if (c->control_data_header_size_bits > 32) {
477 this->current_annotation = "emit vertex: emit control data bits";
478 /* Only emit control data bits if we've finished accumulating a batch
479 * of 32 bits. This is the case when:
480 *
481 * (vertex_count * bits_per_vertex) % 32 == 0
482 *
483 * (in other words, when the last 5 bits of vertex_count *
484 * bits_per_vertex are 0). Assuming bits_per_vertex == 2^n for some
485 * integer n (which is always the case, since bits_per_vertex is
486 * always 1 or 2), this is equivalent to requiring that the last 5-n
487 * bits of vertex_count are 0:
488 *
489 * vertex_count & (2^(5-n) - 1) == 0
490 *
491 * 2^(5-n) == 2^5 / 2^n == 32 / bits_per_vertex, so this is
492 * equivalent to:
493 *
494 * vertex_count & (32 / bits_per_vertex - 1) == 0
495 */
496 vec4_instruction *inst =
497 emit(AND(dst_null_ud(), this->vertex_count,
498 brw_imm_ud(32 / c->control_data_bits_per_vertex - 1)));
499 inst->conditional_mod = BRW_CONDITIONAL_Z;
500
501 emit(IF(BRW_PREDICATE_NORMAL));
502 {
503 /* If vertex_count is 0, then no control data bits have been
504 * accumulated yet, so we skip emitting them.
505 */
506 emit(CMP(dst_null_ud(), this->vertex_count, brw_imm_ud(0u),
507 BRW_CONDITIONAL_NEQ));
508 emit(IF(BRW_PREDICATE_NORMAL));
509 emit_control_data_bits();
510 emit(BRW_OPCODE_ENDIF);
511
512 /* Reset control_data_bits to 0 so we can start accumulating a new
513 * batch.
514 *
515 * Note: in the case where vertex_count == 0, this neutralizes the
516 * effect of any call to EndPrimitive() that the shader may have
517 * made before outputting its first vertex.
518 */
519 inst = emit(MOV(dst_reg(this->control_data_bits), brw_imm_ud(0u)));
520 inst->force_writemask_all = true;
521 }
522 emit(BRW_OPCODE_ENDIF);
523 }
524
525 this->current_annotation = "emit vertex: vertex data";
526 emit_vertex();
527
528 /* In stream mode we have to set control data bits for all vertices
529 * unless we have disabled control data bits completely (which we do
530 * do for GL_POINTS outputs that don't use streams).
531 */
532 if (c->control_data_header_size_bits > 0 &&
533 gs_prog_data->control_data_format ==
534 GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_SID) {
535 this->current_annotation = "emit vertex: Stream control data bits";
536 set_stream_control_data_bits(stream_id);
537 }
538
539 this->current_annotation = NULL;
540 }
541
542 void
gs_end_primitive()543 vec4_gs_visitor::gs_end_primitive()
544 {
545 /* We can only do EndPrimitive() functionality when the control data
546 * consists of cut bits. Fortunately, the only time it isn't is when the
547 * output type is points, in which case EndPrimitive() is a no-op.
548 */
549 if (gs_prog_data->control_data_format !=
550 GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT) {
551 return;
552 }
553
554 if (c->control_data_header_size_bits == 0)
555 return;
556
557 /* Cut bits use one bit per vertex. */
558 assert(c->control_data_bits_per_vertex == 1);
559
560 /* Cut bit n should be set to 1 if EndPrimitive() was called after emitting
561 * vertex n, 0 otherwise. So all we need to do here is mark bit
562 * (vertex_count - 1) % 32 in the cut_bits register to indicate that
563 * EndPrimitive() was called after emitting vertex (vertex_count - 1);
564 * vec4_gs_visitor::emit_control_data_bits() will take care of the rest.
565 *
566 * Note that if EndPrimitve() is called before emitting any vertices, this
567 * will cause us to set bit 31 of the control_data_bits register to 1.
568 * That's fine because:
569 *
570 * - If max_vertices < 32, then vertex number 31 (zero-based) will never be
571 * output, so the hardware will ignore cut bit 31.
572 *
573 * - If max_vertices == 32, then vertex number 31 is guaranteed to be the
574 * last vertex, so setting cut bit 31 has no effect (since the primitive
575 * is automatically ended when the GS terminates).
576 *
577 * - If max_vertices > 32, then the ir_emit_vertex visitor will reset the
578 * control_data_bits register to 0 when the first vertex is emitted.
579 */
580
581 /* control_data_bits |= 1 << ((vertex_count - 1) % 32) */
582 src_reg one(this, glsl_type::uint_type);
583 emit(MOV(dst_reg(one), brw_imm_ud(1u)));
584 src_reg prev_count(this, glsl_type::uint_type);
585 emit(ADD(dst_reg(prev_count), this->vertex_count, brw_imm_ud(0xffffffffu)));
586 src_reg mask(this, glsl_type::uint_type);
587 /* Note: we're relying on the fact that the GEN SHL instruction only pays
588 * attention to the lower 5 bits of its second source argument, so on this
589 * architecture, 1 << (vertex_count - 1) is equivalent to 1 <<
590 * ((vertex_count - 1) % 32).
591 */
592 emit(SHL(dst_reg(mask), one, prev_count));
593 emit(OR(dst_reg(this->control_data_bits), this->control_data_bits, mask));
594 }
595
596 static const GLuint gl_prim_to_hw_prim[GL_TRIANGLE_STRIP_ADJACENCY+1] = {
597 [GL_POINTS] =_3DPRIM_POINTLIST,
598 [GL_LINES] = _3DPRIM_LINELIST,
599 [GL_LINE_LOOP] = _3DPRIM_LINELOOP,
600 [GL_LINE_STRIP] = _3DPRIM_LINESTRIP,
601 [GL_TRIANGLES] = _3DPRIM_TRILIST,
602 [GL_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
603 [GL_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
604 [GL_QUADS] = _3DPRIM_QUADLIST,
605 [GL_QUAD_STRIP] = _3DPRIM_QUADSTRIP,
606 [GL_POLYGON] = _3DPRIM_POLYGON,
607 [GL_LINES_ADJACENCY] = _3DPRIM_LINELIST_ADJ,
608 [GL_LINE_STRIP_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ,
609 [GL_TRIANGLES_ADJACENCY] = _3DPRIM_TRILIST_ADJ,
610 [GL_TRIANGLE_STRIP_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
611 };
612
613 extern "C" const unsigned *
brw_compile_gs(const struct brw_compiler * compiler,void * log_data,void * mem_ctx,const struct brw_gs_prog_key * key,struct brw_gs_prog_data * prog_data,const nir_shader * src_shader,struct gl_program * prog,int shader_time_index,char ** error_str)614 brw_compile_gs(const struct brw_compiler *compiler, void *log_data,
615 void *mem_ctx,
616 const struct brw_gs_prog_key *key,
617 struct brw_gs_prog_data *prog_data,
618 const nir_shader *src_shader,
619 struct gl_program *prog,
620 int shader_time_index,
621 char **error_str)
622 {
623 struct brw_gs_compile c;
624 memset(&c, 0, sizeof(c));
625 c.key = *key;
626
627 const bool is_scalar = compiler->scalar_stage[MESA_SHADER_GEOMETRY];
628 nir_shader *shader = nir_shader_clone(mem_ctx, src_shader);
629
630 /* The GLSL linker will have already matched up GS inputs and the outputs
631 * of prior stages. The driver does extend VS outputs in some cases, but
632 * only for legacy OpenGL or Gen4-5 hardware, neither of which offer
633 * geometry shader support. So we can safely ignore that.
634 *
635 * For SSO pipelines, we use a fixed VUE map layout based on variable
636 * locations, so we can rely on rendezvous-by-location making this work.
637 */
638 GLbitfield64 inputs_read = shader->info.inputs_read;
639 brw_compute_vue_map(compiler->devinfo,
640 &c.input_vue_map, inputs_read,
641 shader->info.separate_shader);
642
643 shader = brw_nir_apply_sampler_key(shader, compiler, &key->tex, is_scalar);
644 brw_nir_lower_vue_inputs(shader, &c.input_vue_map);
645 brw_nir_lower_vue_outputs(shader, is_scalar);
646 shader = brw_postprocess_nir(shader, compiler, is_scalar);
647
648 prog_data->base.clip_distance_mask =
649 ((1 << shader->info.clip_distance_array_size) - 1);
650 prog_data->base.cull_distance_mask =
651 ((1 << shader->info.cull_distance_array_size) - 1) <<
652 shader->info.clip_distance_array_size;
653
654 prog_data->include_primitive_id =
655 (shader->info.system_values_read & (1 << SYSTEM_VALUE_PRIMITIVE_ID)) != 0;
656
657 prog_data->invocations = shader->info.gs.invocations;
658
659 if (compiler->devinfo->gen >= 8)
660 prog_data->static_vertex_count = nir_gs_count_vertices(shader);
661
662 if (compiler->devinfo->gen >= 7) {
663 if (shader->info.gs.output_primitive == GL_POINTS) {
664 /* When the output type is points, the geometry shader may output data
665 * to multiple streams, and EndPrimitive() has no effect. So we
666 * configure the hardware to interpret the control data as stream ID.
667 */
668 prog_data->control_data_format = GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_SID;
669
670 /* We only have to emit control bits if we are using streams */
671 if (prog && prog->info.gs.uses_streams)
672 c.control_data_bits_per_vertex = 2;
673 else
674 c.control_data_bits_per_vertex = 0;
675 } else {
676 /* When the output type is triangle_strip or line_strip, EndPrimitive()
677 * may be used to terminate the current strip and start a new one
678 * (similar to primitive restart), and outputting data to multiple
679 * streams is not supported. So we configure the hardware to interpret
680 * the control data as EndPrimitive information (a.k.a. "cut bits").
681 */
682 prog_data->control_data_format = GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT;
683
684 /* We only need to output control data if the shader actually calls
685 * EndPrimitive().
686 */
687 c.control_data_bits_per_vertex =
688 shader->info.gs.uses_end_primitive ? 1 : 0;
689 }
690 } else {
691 /* There are no control data bits in gen6. */
692 c.control_data_bits_per_vertex = 0;
693 }
694 c.control_data_header_size_bits =
695 shader->info.gs.vertices_out * c.control_data_bits_per_vertex;
696
697 /* 1 HWORD = 32 bytes = 256 bits */
698 prog_data->control_data_header_size_hwords =
699 ALIGN(c.control_data_header_size_bits, 256) / 256;
700
701 /* Compute the output vertex size.
702 *
703 * From the Ivy Bridge PRM, Vol2 Part1 7.2.1.1 STATE_GS - Output Vertex
704 * Size (p168):
705 *
706 * [0,62] indicating [1,63] 16B units
707 *
708 * Specifies the size of each vertex stored in the GS output entry
709 * (following any Control Header data) as a number of 128-bit units
710 * (minus one).
711 *
712 * Programming Restrictions: The vertex size must be programmed as a
713 * multiple of 32B units with the following exception: Rendering is
714 * disabled (as per SOL stage state) and the vertex size output by the
715 * GS thread is 16B.
716 *
717 * If rendering is enabled (as per SOL state) the vertex size must be
718 * programmed as a multiple of 32B units. In other words, the only time
719 * software can program a vertex size with an odd number of 16B units
720 * is when rendering is disabled.
721 *
722 * Note: B=bytes in the above text.
723 *
724 * It doesn't seem worth the extra trouble to optimize the case where the
725 * vertex size is 16B (especially since this would require special-casing
726 * the GEN assembly that writes to the URB). So we just set the vertex
727 * size to a multiple of 32B (2 vec4's) in all cases.
728 *
729 * The maximum output vertex size is 62*16 = 992 bytes (31 hwords). We
730 * budget that as follows:
731 *
732 * 512 bytes for varyings (a varying component is 4 bytes and
733 * gl_MaxGeometryOutputComponents = 128)
734 * 16 bytes overhead for VARYING_SLOT_PSIZ (each varying slot is 16
735 * bytes)
736 * 16 bytes overhead for gl_Position (we allocate it a slot in the VUE
737 * even if it's not used)
738 * 32 bytes overhead for gl_ClipDistance (we allocate it 2 VUE slots
739 * whenever clip planes are enabled, even if the shader doesn't
740 * write to gl_ClipDistance)
741 * 16 bytes overhead since the VUE size must be a multiple of 32 bytes
742 * (see below)--this causes up to 1 VUE slot to be wasted
743 * 400 bytes available for varying packing overhead
744 *
745 * Worst-case varying packing overhead is 3/4 of a varying slot (12 bytes)
746 * per interpolation type, so this is plenty.
747 *
748 */
749 unsigned output_vertex_size_bytes = prog_data->base.vue_map.num_slots * 16;
750 assert(compiler->devinfo->gen == 6 ||
751 output_vertex_size_bytes <= GEN7_MAX_GS_OUTPUT_VERTEX_SIZE_BYTES);
752 prog_data->output_vertex_size_hwords =
753 ALIGN(output_vertex_size_bytes, 32) / 32;
754
755 /* Compute URB entry size. The maximum allowed URB entry size is 32k.
756 * That divides up as follows:
757 *
758 * 64 bytes for the control data header (cut indices or StreamID bits)
759 * 4096 bytes for varyings (a varying component is 4 bytes and
760 * gl_MaxGeometryTotalOutputComponents = 1024)
761 * 4096 bytes overhead for VARYING_SLOT_PSIZ (each varying slot is 16
762 * bytes/vertex and gl_MaxGeometryOutputVertices is 256)
763 * 4096 bytes overhead for gl_Position (we allocate it a slot in the VUE
764 * even if it's not used)
765 * 8192 bytes overhead for gl_ClipDistance (we allocate it 2 VUE slots
766 * whenever clip planes are enabled, even if the shader doesn't
767 * write to gl_ClipDistance)
768 * 4096 bytes overhead since the VUE size must be a multiple of 32
769 * bytes (see above)--this causes up to 1 VUE slot to be wasted
770 * 8128 bytes available for varying packing overhead
771 *
772 * Worst-case varying packing overhead is 3/4 of a varying slot per
773 * interpolation type, which works out to 3072 bytes, so this would allow
774 * us to accommodate 2 interpolation types without any danger of running
775 * out of URB space.
776 *
777 * In practice, the risk of running out of URB space is very small, since
778 * the above figures are all worst-case, and most of them scale with the
779 * number of output vertices. So we'll just calculate the amount of space
780 * we need, and if it's too large, fail to compile.
781 *
782 * The above is for gen7+ where we have a single URB entry that will hold
783 * all the output. In gen6, we will have to allocate URB entries for every
784 * vertex we emit, so our URB entries only need to be large enough to hold
785 * a single vertex. Also, gen6 does not have a control data header.
786 */
787 unsigned output_size_bytes;
788 if (compiler->devinfo->gen >= 7) {
789 output_size_bytes =
790 prog_data->output_vertex_size_hwords * 32 * shader->info.gs.vertices_out;
791 output_size_bytes += 32 * prog_data->control_data_header_size_hwords;
792 } else {
793 output_size_bytes = prog_data->output_vertex_size_hwords * 32;
794 }
795
796 /* Broadwell stores "Vertex Count" as a full 8 DWord (32 byte) URB output,
797 * which comes before the control header.
798 */
799 if (compiler->devinfo->gen >= 8)
800 output_size_bytes += 32;
801
802 /* Shaders can technically set max_vertices = 0, at which point we
803 * may have a URB size of 0 bytes. Nothing good can come from that,
804 * so enforce a minimum size.
805 */
806 if (output_size_bytes == 0)
807 output_size_bytes = 1;
808
809 unsigned max_output_size_bytes = GEN7_MAX_GS_URB_ENTRY_SIZE_BYTES;
810 if (compiler->devinfo->gen == 6)
811 max_output_size_bytes = GEN6_MAX_GS_URB_ENTRY_SIZE_BYTES;
812 if (output_size_bytes > max_output_size_bytes)
813 return NULL;
814
815
816 /* URB entry sizes are stored as a multiple of 64 bytes in gen7+ and
817 * a multiple of 128 bytes in gen6.
818 */
819 if (compiler->devinfo->gen >= 7) {
820 prog_data->base.urb_entry_size = ALIGN(output_size_bytes, 64) / 64;
821 /* On Cannonlake software shall not program an allocation size that
822 * specifies a size that is a multiple of 3 64B (512-bit) cachelines.
823 */
824 if (compiler->devinfo->gen == 10 &&
825 prog_data->base.urb_entry_size % 3 == 0)
826 prog_data->base.urb_entry_size++;
827 } else {
828 prog_data->base.urb_entry_size = ALIGN(output_size_bytes, 128) / 128;
829 }
830
831 assert(shader->info.gs.output_primitive < ARRAY_SIZE(gl_prim_to_hw_prim));
832 prog_data->output_topology =
833 gl_prim_to_hw_prim[shader->info.gs.output_primitive];
834
835 prog_data->vertices_in = shader->info.gs.vertices_in;
836
837 /* GS inputs are read from the VUE 256 bits (2 vec4's) at a time, so we
838 * need to program a URB read length of ceiling(num_slots / 2).
839 */
840 prog_data->base.urb_read_length = (c.input_vue_map.num_slots + 1) / 2;
841
842 /* Now that prog_data setup is done, we are ready to actually compile the
843 * program.
844 */
845 if (unlikely(INTEL_DEBUG & DEBUG_GS)) {
846 fprintf(stderr, "GS Input ");
847 brw_print_vue_map(stderr, &c.input_vue_map);
848 fprintf(stderr, "GS Output ");
849 brw_print_vue_map(stderr, &prog_data->base.vue_map);
850 }
851
852 if (is_scalar) {
853 fs_visitor v(compiler, log_data, mem_ctx, &c, prog_data, shader,
854 shader_time_index);
855 if (v.run_gs()) {
856 prog_data->base.dispatch_mode = DISPATCH_MODE_SIMD8;
857 prog_data->base.base.dispatch_grf_start_reg = v.payload.num_regs;
858
859 fs_generator g(compiler, log_data, mem_ctx, &c.key,
860 &prog_data->base.base, v.promoted_constants,
861 false, MESA_SHADER_GEOMETRY);
862 if (unlikely(INTEL_DEBUG & DEBUG_GS)) {
863 const char *label =
864 shader->info.label ? shader->info.label : "unnamed";
865 char *name = ralloc_asprintf(mem_ctx, "%s geometry shader %s",
866 label, shader->info.name);
867 g.enable_debug(name);
868 }
869 g.generate_code(v.cfg, 8);
870 return g.get_assembly(&prog_data->base.base.program_size);
871 }
872 }
873
874 if (compiler->devinfo->gen >= 7) {
875 /* Compile the geometry shader in DUAL_OBJECT dispatch mode, if we can do
876 * so without spilling. If the GS invocations count > 1, then we can't use
877 * dual object mode.
878 */
879 if (prog_data->invocations <= 1 &&
880 likely(!(INTEL_DEBUG & DEBUG_NO_DUAL_OBJECT_GS))) {
881 prog_data->base.dispatch_mode = DISPATCH_MODE_4X2_DUAL_OBJECT;
882
883 vec4_gs_visitor v(compiler, log_data, &c, prog_data, shader,
884 mem_ctx, true /* no_spills */, shader_time_index);
885
886 /* Backup 'nr_params' and 'param' as they can be modified by the
887 * the DUAL_OBJECT visitor. If it fails, we will run the fallback
888 * (DUAL_INSTANCED or SINGLE mode) and we need to restore original
889 * values.
890 */
891 const unsigned param_count = prog_data->base.base.nr_params;
892 uint32_t *param = ralloc_array(NULL, uint32_t, param_count);
893 memcpy(param, prog_data->base.base.param,
894 sizeof(uint32_t) * param_count);
895
896 if (v.run()) {
897 /* Success! Backup is not needed */
898 ralloc_free(param);
899 return brw_vec4_generate_assembly(compiler, log_data, mem_ctx,
900 shader, &prog_data->base, v.cfg,
901 &prog_data->base.base.
902 program_size);
903 } else {
904 /* These variables could be modified by the execution of the GS
905 * visitor if it packed the uniforms in the push constant buffer.
906 * As it failed, we need restore them so we can start again with
907 * DUAL_INSTANCED or SINGLE mode.
908 *
909 * FIXME: Could more variables be modified by this execution?
910 */
911 memcpy(prog_data->base.base.param, param,
912 sizeof(uint32_t) * param_count);
913 prog_data->base.base.nr_params = param_count;
914 prog_data->base.base.nr_pull_params = 0;
915 ralloc_free(param);
916 }
917 }
918 }
919
920 /* Either we failed to compile in DUAL_OBJECT mode (probably because it
921 * would have required spilling) or DUAL_OBJECT mode is disabled. So fall
922 * back to DUAL_INSTANCED or SINGLE mode, which consumes fewer registers.
923 *
924 * FIXME: Single dispatch mode requires that the driver can handle
925 * interleaving of input registers, but this is already supported (dual
926 * instance mode has the same requirement). However, to take full advantage
927 * of single dispatch mode to reduce register pressure we would also need to
928 * do interleaved outputs, but currently, the vec4 visitor and generator
929 * classes do not support this, so at the moment register pressure in
930 * single and dual instance modes is the same.
931 *
932 * From the Ivy Bridge PRM, Vol2 Part1 7.2.1.1 "3DSTATE_GS"
933 * "If InstanceCount>1, DUAL_OBJECT mode is invalid. Software will likely
934 * want to use DUAL_INSTANCE mode for higher performance, but SINGLE mode
935 * is also supported. When InstanceCount=1 (one instance per object) software
936 * can decide which dispatch mode to use. DUAL_OBJECT mode would likely be
937 * the best choice for performance, followed by SINGLE mode."
938 *
939 * So SINGLE mode is more performant when invocations == 1 and DUAL_INSTANCE
940 * mode is more performant when invocations > 1. Gen6 only supports
941 * SINGLE mode.
942 */
943 if (prog_data->invocations <= 1 || compiler->devinfo->gen < 7)
944 prog_data->base.dispatch_mode = DISPATCH_MODE_4X1_SINGLE;
945 else
946 prog_data->base.dispatch_mode = DISPATCH_MODE_4X2_DUAL_INSTANCE;
947
948 vec4_gs_visitor *gs = NULL;
949 const unsigned *ret = NULL;
950
951 if (compiler->devinfo->gen >= 7)
952 gs = new vec4_gs_visitor(compiler, log_data, &c, prog_data,
953 shader, mem_ctx, false /* no_spills */,
954 shader_time_index);
955 else
956 gs = new gen6_gs_visitor(compiler, log_data, &c, prog_data, prog,
957 shader, mem_ctx, false /* no_spills */,
958 shader_time_index);
959
960 if (!gs->run()) {
961 if (error_str)
962 *error_str = ralloc_strdup(mem_ctx, gs->fail_msg);
963 } else {
964 ret = brw_vec4_generate_assembly(compiler, log_data, mem_ctx, shader,
965 &prog_data->base, gs->cfg,
966 &prog_data->base.base.program_size);
967 }
968
969 delete gs;
970 return ret;
971 }
972
973
974 } /* namespace brw */
975