• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2017 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "ac_llvm_cull.h"
25 #include "si_pipe.h"
26 #include "si_shader_internal.h"
27 #include "sid.h"
28 #include "util/u_memory.h"
29 #include "util/u_prim.h"
30 
get_wave_id_in_tg(struct si_shader_context * ctx)31 static LLVMValueRef get_wave_id_in_tg(struct si_shader_context *ctx)
32 {
33    return si_unpack_param(ctx, ctx->merged_wave_info, 24, 4);
34 }
35 
get_tgsize(struct si_shader_context * ctx)36 static LLVMValueRef get_tgsize(struct si_shader_context *ctx)
37 {
38    return si_unpack_param(ctx, ctx->merged_wave_info, 28, 4);
39 }
40 
get_thread_id_in_tg(struct si_shader_context * ctx)41 static LLVMValueRef get_thread_id_in_tg(struct si_shader_context *ctx)
42 {
43    LLVMBuilderRef builder = ctx->ac.builder;
44    LLVMValueRef tmp;
45    tmp = LLVMBuildMul(builder, get_wave_id_in_tg(ctx),
46                       LLVMConstInt(ctx->ac.i32, ctx->ac.wave_size, false), "");
47    return LLVMBuildAdd(builder, tmp, ac_get_thread_id(&ctx->ac), "");
48 }
49 
ngg_get_vtx_cnt(struct si_shader_context * ctx)50 static LLVMValueRef ngg_get_vtx_cnt(struct si_shader_context *ctx)
51 {
52    return si_unpack_param(ctx, ctx->gs_tg_info, 12, 9);
53 }
54 
ngg_get_prim_cnt(struct si_shader_context * ctx)55 static LLVMValueRef ngg_get_prim_cnt(struct si_shader_context *ctx)
56 {
57    return si_unpack_param(ctx, ctx->gs_tg_info, 22, 9);
58 }
59 
ngg_get_ordered_id(struct si_shader_context * ctx)60 static LLVMValueRef ngg_get_ordered_id(struct si_shader_context *ctx)
61 {
62    return si_unpack_param(ctx, ctx->gs_tg_info, 0, 12);
63 }
64 
ngg_get_query_buf(struct si_shader_context * ctx)65 static LLVMValueRef ngg_get_query_buf(struct si_shader_context *ctx)
66 {
67    LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac, ctx->rw_buffers);
68 
69    return ac_build_load_to_sgpr(&ctx->ac, buf_ptr,
70                                 LLVMConstInt(ctx->ac.i32, GFX10_GS_QUERY_BUF, false));
71 }
72 
ngg_get_initial_edgeflag(struct si_shader_context * ctx,unsigned index)73 static LLVMValueRef ngg_get_initial_edgeflag(struct si_shader_context *ctx, unsigned index)
74 {
75    if (ctx->stage == MESA_SHADER_VERTEX) {
76       LLVMValueRef tmp;
77       tmp = LLVMBuildLShr(ctx->ac.builder, ac_get_arg(&ctx->ac, ctx->args.gs_invocation_id),
78                           LLVMConstInt(ctx->ac.i32, 8 + index, false), "");
79       return LLVMBuildTrunc(ctx->ac.builder, tmp, ctx->ac.i1, "");
80    }
81    return ctx->ac.i1false;
82 }
83 
84 /**
85  * Return the number of vertices as a constant in \p num_vertices,
86  * and return a more precise value as LLVMValueRef from the function.
87  */
ngg_get_vertices_per_prim(struct si_shader_context * ctx,unsigned * num_vertices)88 static LLVMValueRef ngg_get_vertices_per_prim(struct si_shader_context *ctx, unsigned *num_vertices)
89 {
90    const struct si_shader_info *info = &ctx->shader->selector->info;
91 
92    if (ctx->stage == MESA_SHADER_VERTEX) {
93       if (info->base.vs.blit_sgprs_amd) {
94          /* Blits always use axis-aligned rectangles with 3 vertices. */
95          *num_vertices = 3;
96          return LLVMConstInt(ctx->ac.i32, 3, 0);
97       } else {
98          /* We always build up all three indices for the prim export
99           * independent of the primitive type. The additional garbage
100           * data shouldn't hurt. This number doesn't matter with
101           * NGG passthrough.
102           */
103          *num_vertices = 3;
104 
105          /* Extract OUTPRIM field. */
106          LLVMValueRef num = si_unpack_param(ctx, ctx->vs_state_bits, 2, 2);
107          return LLVMBuildAdd(ctx->ac.builder, num, ctx->ac.i32_1, "");
108       }
109    } else {
110       assert(ctx->stage == MESA_SHADER_TESS_EVAL);
111 
112       if (info->base.tess.point_mode)
113          *num_vertices = 1;
114       else if (info->base.tess.primitive_mode == GL_LINES)
115          *num_vertices = 2;
116       else
117          *num_vertices = 3;
118 
119       return LLVMConstInt(ctx->ac.i32, *num_vertices, false);
120    }
121 }
122 
gfx10_ngg_export_prim_early(struct si_shader * shader)123 bool gfx10_ngg_export_prim_early(struct si_shader *shader)
124 {
125    struct si_shader_selector *sel = shader->selector;
126 
127    assert(shader->key.as_ngg && !shader->key.as_es);
128 
129    return sel->info.stage != MESA_SHADER_GEOMETRY && !sel->info.writes_edgeflag;
130 }
131 
gfx10_ngg_build_sendmsg_gs_alloc_req(struct si_shader_context * ctx)132 void gfx10_ngg_build_sendmsg_gs_alloc_req(struct si_shader_context *ctx)
133 {
134    ac_build_sendmsg_gs_alloc_req(&ctx->ac, get_wave_id_in_tg(ctx), ngg_get_vtx_cnt(ctx),
135                                  ngg_get_prim_cnt(ctx));
136 }
137 
gfx10_ngg_build_export_prim(struct si_shader_context * ctx,LLVMValueRef user_edgeflags[3],LLVMValueRef prim_passthrough)138 void gfx10_ngg_build_export_prim(struct si_shader_context *ctx, LLVMValueRef user_edgeflags[3],
139                                  LLVMValueRef prim_passthrough)
140 {
141    LLVMBuilderRef builder = ctx->ac.builder;
142 
143    if (gfx10_is_ngg_passthrough(ctx->shader) || ctx->shader->key.opt.ngg_culling) {
144       ac_build_ifcc(&ctx->ac, si_is_gs_thread(ctx), 6001);
145       {
146          struct ac_ngg_prim prim = {};
147 
148          if (prim_passthrough)
149             prim.passthrough = prim_passthrough;
150          else
151             prim.passthrough = ac_get_arg(&ctx->ac, ctx->gs_vtx01_offset);
152 
153          /* This is only used with NGG culling, which returns the NGG
154           * passthrough prim export encoding.
155           */
156          if (ctx->shader->selector->info.writes_edgeflag) {
157             unsigned all_bits_no_edgeflags = ~SI_NGG_PRIM_EDGE_FLAG_BITS;
158             LLVMValueRef edgeflags = LLVMConstInt(ctx->ac.i32, all_bits_no_edgeflags, 0);
159 
160             unsigned num_vertices;
161             ngg_get_vertices_per_prim(ctx, &num_vertices);
162 
163             for (unsigned i = 0; i < num_vertices; i++) {
164                unsigned shift = 9 + i * 10;
165                LLVMValueRef edge;
166 
167                edge = LLVMBuildLoad(builder, user_edgeflags[i], "");
168                edge = LLVMBuildZExt(builder, edge, ctx->ac.i32, "");
169                edge = LLVMBuildShl(builder, edge, LLVMConstInt(ctx->ac.i32, shift, 0), "");
170                edgeflags = LLVMBuildOr(builder, edgeflags, edge, "");
171             }
172             prim.passthrough = LLVMBuildAnd(builder, prim.passthrough, edgeflags, "");
173          }
174 
175          ac_build_export_prim(&ctx->ac, &prim);
176       }
177       ac_build_endif(&ctx->ac, 6001);
178       return;
179    }
180 
181    ac_build_ifcc(&ctx->ac, si_is_gs_thread(ctx), 6001);
182    {
183       struct ac_ngg_prim prim = {};
184 
185       ngg_get_vertices_per_prim(ctx, &prim.num_vertices);
186 
187       prim.isnull = ctx->ac.i1false;
188       prim.index[0] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 0, 16);
189       prim.index[1] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 16, 16);
190       prim.index[2] = si_unpack_param(ctx, ctx->gs_vtx23_offset, 0, 16);
191 
192       for (unsigned i = 0; i < prim.num_vertices; ++i) {
193          prim.edgeflag[i] = ngg_get_initial_edgeflag(ctx, i);
194 
195          if (ctx->shader->selector->info.writes_edgeflag) {
196             LLVMValueRef edge;
197 
198             edge = LLVMBuildLoad(ctx->ac.builder, user_edgeflags[i], "");
199             edge = LLVMBuildAnd(ctx->ac.builder, prim.edgeflag[i], edge, "");
200             prim.edgeflag[i] = edge;
201          }
202       }
203 
204       ac_build_export_prim(&ctx->ac, &prim);
205    }
206    ac_build_endif(&ctx->ac, 6001);
207 }
208 
build_streamout_vertex(struct si_shader_context * ctx,LLVMValueRef * so_buffer,LLVMValueRef * wg_offset_dw,unsigned stream,LLVMValueRef offset_vtx,LLVMValueRef vertexptr)209 static void build_streamout_vertex(struct si_shader_context *ctx, LLVMValueRef *so_buffer,
210                                    LLVMValueRef *wg_offset_dw, unsigned stream,
211                                    LLVMValueRef offset_vtx, LLVMValueRef vertexptr)
212 {
213    struct si_shader_info *info = &ctx->shader->selector->info;
214    struct pipe_stream_output_info *so = &ctx->shader->selector->so;
215    LLVMBuilderRef builder = ctx->ac.builder;
216    LLVMValueRef offset[4] = {};
217    LLVMValueRef tmp;
218 
219    for (unsigned buffer = 0; buffer < 4; ++buffer) {
220       if (!wg_offset_dw[buffer])
221          continue;
222 
223       tmp = LLVMBuildMul(builder, offset_vtx, LLVMConstInt(ctx->ac.i32, so->stride[buffer], false),
224                          "");
225       tmp = LLVMBuildAdd(builder, wg_offset_dw[buffer], tmp, "");
226       offset[buffer] = LLVMBuildShl(builder, tmp, LLVMConstInt(ctx->ac.i32, 2, false), "");
227    }
228 
229    for (unsigned i = 0; i < so->num_outputs; ++i) {
230       if (so->output[i].stream != stream)
231          continue;
232 
233       unsigned reg = so->output[i].register_index;
234       struct si_shader_output_values out;
235       out.semantic = info->output_semantic[reg];
236 
237       for (unsigned comp = 0; comp < 4; comp++) {
238          tmp = ac_build_gep0(&ctx->ac, vertexptr, LLVMConstInt(ctx->ac.i32, 4 * reg + comp, false));
239          out.values[comp] = LLVMBuildLoad(builder, tmp, "");
240          out.vertex_stream[comp] = (info->output_streams[reg] >> (2 * comp)) & 3;
241       }
242 
243       si_llvm_streamout_store_output(ctx, so_buffer, offset, &so->output[i], &out);
244    }
245 }
246 
247 struct ngg_streamout {
248    LLVMValueRef num_vertices;
249 
250    /* per-thread data */
251    LLVMValueRef prim_enable[4]; /* i1 per stream */
252    LLVMValueRef vertices[3];    /* [N x i32] addrspace(LDS)* */
253 
254    /* Output */
255    LLVMValueRef emit[4]; /* per-stream emitted primitives (only valid for used streams) */
256 };
257 
258 /**
259  * Build streamout logic.
260  *
261  * Implies a barrier.
262  *
263  * Writes number of emitted primitives to gs_ngg_scratch[4:8].
264  *
265  * Clobbers gs_ngg_scratch[8:].
266  */
build_streamout(struct si_shader_context * ctx,struct ngg_streamout * nggso)267 static void build_streamout(struct si_shader_context *ctx, struct ngg_streamout *nggso)
268 {
269    struct si_shader_info *info = &ctx->shader->selector->info;
270    struct pipe_stream_output_info *so = &ctx->shader->selector->so;
271    LLVMBuilderRef builder = ctx->ac.builder;
272    LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac, ctx->rw_buffers);
273    LLVMValueRef tid = get_thread_id_in_tg(ctx);
274    LLVMValueRef tmp, tmp2;
275    LLVMValueRef i32_2 = LLVMConstInt(ctx->ac.i32, 2, false);
276    LLVMValueRef i32_4 = LLVMConstInt(ctx->ac.i32, 4, false);
277    LLVMValueRef i32_8 = LLVMConstInt(ctx->ac.i32, 8, false);
278    LLVMValueRef so_buffer[4] = {};
279    unsigned max_num_vertices = 1 + (nggso->vertices[1] ? 1 : 0) + (nggso->vertices[2] ? 1 : 0);
280    LLVMValueRef prim_stride_dw[4] = {};
281    LLVMValueRef prim_stride_dw_vgpr = LLVMGetUndef(ctx->ac.i32);
282    int stream_for_buffer[4] = {-1, -1, -1, -1};
283    unsigned bufmask_for_stream[4] = {};
284    bool isgs = ctx->stage == MESA_SHADER_GEOMETRY;
285    unsigned scratch_emit_base = isgs ? 4 : 0;
286    LLVMValueRef scratch_emit_basev = isgs ? i32_4 : ctx->ac.i32_0;
287    unsigned scratch_offset_base = isgs ? 8 : 4;
288    LLVMValueRef scratch_offset_basev = isgs ? i32_8 : i32_4;
289 
290    ac_llvm_add_target_dep_function_attr(ctx->main_fn, "amdgpu-gds-size", 256);
291 
292    /* Determine the mapping of streamout buffers to vertex streams. */
293    for (unsigned i = 0; i < so->num_outputs; ++i) {
294       unsigned buf = so->output[i].output_buffer;
295       unsigned stream = so->output[i].stream;
296       assert(stream_for_buffer[buf] < 0 || stream_for_buffer[buf] == stream);
297       stream_for_buffer[buf] = stream;
298       bufmask_for_stream[stream] |= 1 << buf;
299    }
300 
301    for (unsigned buffer = 0; buffer < 4; ++buffer) {
302       if (stream_for_buffer[buffer] == -1)
303          continue;
304 
305       assert(so->stride[buffer]);
306 
307       tmp = LLVMConstInt(ctx->ac.i32, so->stride[buffer], false);
308       prim_stride_dw[buffer] = LLVMBuildMul(builder, tmp, nggso->num_vertices, "");
309       prim_stride_dw_vgpr =
310          ac_build_writelane(&ctx->ac, prim_stride_dw_vgpr, prim_stride_dw[buffer],
311                             LLVMConstInt(ctx->ac.i32, buffer, false));
312 
313       so_buffer[buffer] = ac_build_load_to_sgpr(
314          &ctx->ac, buf_ptr, LLVMConstInt(ctx->ac.i32, SI_VS_STREAMOUT_BUF0 + buffer, false));
315    }
316 
317    tmp = LLVMBuildICmp(builder, LLVMIntEQ, get_wave_id_in_tg(ctx), ctx->ac.i32_0, "");
318    ac_build_ifcc(&ctx->ac, tmp, 5200);
319    {
320       LLVMTypeRef gdsptr = LLVMPointerType(ctx->ac.i32, AC_ADDR_SPACE_GDS);
321       LLVMValueRef gdsbase = LLVMBuildIntToPtr(builder, ctx->ac.i32_0, gdsptr, "");
322 
323       /* Advance the streamout offsets in GDS. */
324       LLVMValueRef offsets_vgpr = ac_build_alloca_undef(&ctx->ac, ctx->ac.i32, "");
325       LLVMValueRef generated_by_stream_vgpr = ac_build_alloca_undef(&ctx->ac, ctx->ac.i32, "");
326 
327       tmp = LLVMBuildICmp(builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), i32_4, "");
328       ac_build_ifcc(&ctx->ac, tmp, 5210);
329       {
330          if (isgs) {
331             tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tid);
332             tmp = LLVMBuildLoad(builder, tmp, "");
333          } else {
334             tmp = ac_build_writelane(&ctx->ac, ctx->ac.i32_0, ngg_get_prim_cnt(ctx), ctx->ac.i32_0);
335          }
336          LLVMBuildStore(builder, tmp, generated_by_stream_vgpr);
337 
338          unsigned swizzle[4];
339          int unused_stream = -1;
340          for (unsigned stream = 0; stream < 4; ++stream) {
341             if (!info->num_stream_output_components[stream]) {
342                unused_stream = stream;
343                break;
344             }
345          }
346          for (unsigned buffer = 0; buffer < 4; ++buffer) {
347             if (stream_for_buffer[buffer] >= 0) {
348                swizzle[buffer] = stream_for_buffer[buffer];
349             } else {
350                assert(unused_stream >= 0);
351                swizzle[buffer] = unused_stream;
352             }
353          }
354 
355          tmp = ac_build_quad_swizzle(&ctx->ac, tmp, swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
356          tmp = LLVMBuildMul(builder, tmp, prim_stride_dw_vgpr, "");
357 
358          LLVMValueRef args[] = {
359             LLVMBuildIntToPtr(builder, ngg_get_ordered_id(ctx), gdsptr, ""),
360             tmp,
361             ctx->ac.i32_0,                             // ordering
362             ctx->ac.i32_0,                             // scope
363             ctx->ac.i1false,                           // isVolatile
364             LLVMConstInt(ctx->ac.i32, 4 << 24, false), // OA index
365             ctx->ac.i1true,                            // wave release
366             ctx->ac.i1true,                            // wave done
367          };
368          tmp = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.ds.ordered.add", ctx->ac.i32, args,
369                                   ARRAY_SIZE(args), 0);
370 
371          /* Keep offsets in a VGPR for quick retrieval via readlane by
372           * the first wave for bounds checking, and also store in LDS
373           * for retrieval by all waves later. */
374          LLVMBuildStore(builder, tmp, offsets_vgpr);
375 
376          tmp2 = LLVMBuildAdd(builder, ac_get_thread_id(&ctx->ac), scratch_offset_basev, "");
377          tmp2 = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tmp2);
378          LLVMBuildStore(builder, tmp, tmp2);
379       }
380       ac_build_endif(&ctx->ac, 5210);
381 
382       /* Determine the max emit per buffer. This is done via the SALU, in part
383        * because LLVM can't generate divide-by-multiply if we try to do this
384        * via VALU with one lane per buffer.
385        */
386       LLVMValueRef max_emit[4] = {};
387       for (unsigned buffer = 0; buffer < 4; ++buffer) {
388          if (stream_for_buffer[buffer] == -1)
389             continue;
390 
391          LLVMValueRef bufsize_dw = LLVMBuildLShr(
392             builder, LLVMBuildExtractElement(builder, so_buffer[buffer], i32_2, ""), i32_2, "");
393 
394          tmp = LLVMBuildLoad(builder, offsets_vgpr, "");
395          LLVMValueRef offset_dw =
396             ac_build_readlane(&ctx->ac, tmp, LLVMConstInt(ctx->ac.i32, buffer, false));
397 
398          tmp = LLVMBuildSub(builder, bufsize_dw, offset_dw, "");
399          tmp = LLVMBuildUDiv(builder, tmp, prim_stride_dw[buffer], "");
400 
401          tmp2 = LLVMBuildICmp(builder, LLVMIntULT, bufsize_dw, offset_dw, "");
402          max_emit[buffer] = LLVMBuildSelect(builder, tmp2, ctx->ac.i32_0, tmp, "");
403       }
404 
405       /* Determine the number of emitted primitives per stream and fixup the
406        * GDS counter if necessary.
407        *
408        * This is complicated by the fact that a single stream can emit to
409        * multiple buffers (but luckily not vice versa).
410        */
411       LLVMValueRef emit_vgpr = ctx->ac.i32_0;
412 
413       for (unsigned stream = 0; stream < 4; ++stream) {
414          if (!info->num_stream_output_components[stream])
415             continue;
416 
417          tmp = LLVMBuildLoad(builder, generated_by_stream_vgpr, "");
418          LLVMValueRef generated =
419             ac_build_readlane(&ctx->ac, tmp, LLVMConstInt(ctx->ac.i32, stream, false));
420 
421          LLVMValueRef emit = generated;
422          for (unsigned buffer = 0; buffer < 4; ++buffer) {
423             if (stream_for_buffer[buffer] == stream)
424                emit = ac_build_umin(&ctx->ac, emit, max_emit[buffer]);
425          }
426 
427          emit_vgpr =
428             ac_build_writelane(&ctx->ac, emit_vgpr, emit, LLVMConstInt(ctx->ac.i32, stream, false));
429 
430          /* Fixup the offset using a plain GDS atomic if we overflowed. */
431          tmp = LLVMBuildICmp(builder, LLVMIntULT, emit, generated, "");
432          ac_build_ifcc(&ctx->ac, tmp, 5221); /* scalar branch */
433          tmp = LLVMBuildLShr(builder, LLVMConstInt(ctx->ac.i32, bufmask_for_stream[stream], false),
434                              ac_get_thread_id(&ctx->ac), "");
435          tmp = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
436          ac_build_ifcc(&ctx->ac, tmp, 5222);
437          {
438             tmp = LLVMBuildSub(builder, generated, emit, "");
439             tmp = LLVMBuildMul(builder, tmp, prim_stride_dw_vgpr, "");
440             tmp2 = LLVMBuildGEP(builder, gdsbase, &tid, 1, "");
441             LLVMBuildAtomicRMW(builder, LLVMAtomicRMWBinOpSub, tmp2, tmp,
442                                LLVMAtomicOrderingMonotonic, false);
443          }
444          ac_build_endif(&ctx->ac, 5222);
445          ac_build_endif(&ctx->ac, 5221);
446       }
447 
448       tmp = LLVMBuildICmp(builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), i32_4, "");
449       ac_build_ifcc(&ctx->ac, tmp, 5225);
450       {
451          tmp = LLVMBuildAdd(builder, ac_get_thread_id(&ctx->ac), scratch_emit_basev, "");
452          tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tmp);
453          LLVMBuildStore(builder, emit_vgpr, tmp);
454       }
455       ac_build_endif(&ctx->ac, 5225);
456    }
457    ac_build_endif(&ctx->ac, 5200);
458 
459    /* Determine the workgroup-relative per-thread / primitive offset into
460     * the streamout buffers */
461    struct ac_wg_scan primemit_scan[4] = {};
462 
463    if (isgs) {
464       for (unsigned stream = 0; stream < 4; ++stream) {
465          if (!info->num_stream_output_components[stream])
466             continue;
467 
468          primemit_scan[stream].enable_exclusive = true;
469          primemit_scan[stream].op = nir_op_iadd;
470          primemit_scan[stream].src = nggso->prim_enable[stream];
471          primemit_scan[stream].scratch = ac_build_gep0(
472             &ctx->ac, ctx->gs_ngg_scratch, LLVMConstInt(ctx->ac.i32, 12 + 8 * stream, false));
473          primemit_scan[stream].waveidx = get_wave_id_in_tg(ctx);
474          primemit_scan[stream].numwaves = get_tgsize(ctx);
475          primemit_scan[stream].maxwaves = 8;
476          ac_build_wg_scan_top(&ctx->ac, &primemit_scan[stream]);
477       }
478    }
479 
480    ac_build_s_barrier(&ctx->ac);
481 
482    /* Fetch the per-buffer offsets and per-stream emit counts in all waves. */
483    LLVMValueRef wgoffset_dw[4] = {};
484 
485    {
486       LLVMValueRef scratch_vgpr;
487 
488       tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, ac_get_thread_id(&ctx->ac));
489       scratch_vgpr = LLVMBuildLoad(builder, tmp, "");
490 
491       for (unsigned buffer = 0; buffer < 4; ++buffer) {
492          if (stream_for_buffer[buffer] >= 0) {
493             wgoffset_dw[buffer] =
494                ac_build_readlane(&ctx->ac, scratch_vgpr,
495                                  LLVMConstInt(ctx->ac.i32, scratch_offset_base + buffer, false));
496          }
497       }
498 
499       for (unsigned stream = 0; stream < 4; ++stream) {
500          if (info->num_stream_output_components[stream]) {
501             nggso->emit[stream] =
502                ac_build_readlane(&ctx->ac, scratch_vgpr,
503                                  LLVMConstInt(ctx->ac.i32, scratch_emit_base + stream, false));
504          }
505       }
506    }
507 
508    /* Write out primitive data */
509    for (unsigned stream = 0; stream < 4; ++stream) {
510       if (!info->num_stream_output_components[stream])
511          continue;
512 
513       if (isgs) {
514          ac_build_wg_scan_bottom(&ctx->ac, &primemit_scan[stream]);
515       } else {
516          primemit_scan[stream].result_exclusive = tid;
517       }
518 
519       tmp = LLVMBuildICmp(builder, LLVMIntULT, primemit_scan[stream].result_exclusive,
520                           nggso->emit[stream], "");
521       tmp = LLVMBuildAnd(builder, tmp, nggso->prim_enable[stream], "");
522       ac_build_ifcc(&ctx->ac, tmp, 5240);
523       {
524          LLVMValueRef offset_vtx =
525             LLVMBuildMul(builder, primemit_scan[stream].result_exclusive, nggso->num_vertices, "");
526 
527          for (unsigned i = 0; i < max_num_vertices; ++i) {
528             tmp = LLVMBuildICmp(builder, LLVMIntULT, LLVMConstInt(ctx->ac.i32, i, false),
529                                 nggso->num_vertices, "");
530             ac_build_ifcc(&ctx->ac, tmp, 5241);
531             build_streamout_vertex(ctx, so_buffer, wgoffset_dw, stream, offset_vtx,
532                                    nggso->vertices[i]);
533             ac_build_endif(&ctx->ac, 5241);
534             offset_vtx = LLVMBuildAdd(builder, offset_vtx, ctx->ac.i32_1, "");
535          }
536       }
537       ac_build_endif(&ctx->ac, 5240);
538    }
539 }
540 
541 /* LDS layout of ES vertex data for NGG culling. */
542 enum
543 {
544    /* Byte 0: Boolean ES thread accepted (unculled) flag, and later the old
545     *         ES thread ID. After vertex compaction, compacted ES threads
546     *         store the old thread ID here to copy input VGPRs from uncompacted
547     *         ES threads.
548     * Byte 1: New ES thread ID, loaded by GS to prepare the prim export value.
549     * Byte 2: TES rel patch ID
550     * Byte 3: Unused
551     */
552    lds_byte0_accept_flag = 0,
553    lds_byte1_new_thread_id,
554    lds_byte2_tes_rel_patch_id,
555    lds_byte3_unused,
556 
557    lds_packed_data = 0, /* lds_byteN_... */
558    lds_pos_cull_x_div_w,
559    lds_pos_cull_y_div_w,
560    lds_pos_cull_w,
561 
562    lds_pos_x = lds_packed_data + 1,
563    lds_pos_y,
564    lds_pos_z,
565    lds_pos_w,
566    /* If VS: */
567    lds_vertex_id,
568    lds_instance_id, /* optional */
569    /* If TES: */
570    lds_tes_u = lds_vertex_id,
571    lds_tes_v = lds_instance_id,
572    lds_tes_patch_id, /* optional */
573 };
574 
si_build_gep_i8(struct si_shader_context * ctx,LLVMValueRef ptr,unsigned byte_index)575 static LLVMValueRef si_build_gep_i8(struct si_shader_context *ctx, LLVMValueRef ptr,
576                                     unsigned byte_index)
577 {
578    assert(byte_index < 4);
579    LLVMTypeRef pi8 = LLVMPointerType(ctx->ac.i8, AC_ADDR_SPACE_LDS);
580    LLVMValueRef index = LLVMConstInt(ctx->ac.i32, byte_index, 0);
581 
582    return LLVMBuildGEP(ctx->ac.builder, LLVMBuildPointerCast(ctx->ac.builder, ptr, pi8, ""), &index,
583                        1, "");
584 }
585 
ngg_nogs_vertex_size(struct si_shader * shader)586 static unsigned ngg_nogs_vertex_size(struct si_shader *shader)
587 {
588    unsigned lds_vertex_size = 0;
589 
590    /* The edgeflag is always stored in the last element that's also
591     * used for padding to reduce LDS bank conflicts. */
592    if (shader->selector->so.num_outputs)
593       lds_vertex_size = 4 * shader->selector->info.num_outputs + 1;
594    if (shader->selector->info.writes_edgeflag)
595       lds_vertex_size = MAX2(lds_vertex_size, 1);
596 
597    /* LDS size for passing data from GS to ES.
598     * GS stores Primitive IDs into LDS at the address corresponding
599     * to the ES thread of the provoking vertex. All ES threads
600     * load and export PrimitiveID for their thread.
601     */
602    if (shader->selector->info.stage == MESA_SHADER_VERTEX && shader->key.mono.u.vs_export_prim_id)
603       lds_vertex_size = MAX2(lds_vertex_size, 1);
604 
605    if (shader->key.opt.ngg_culling) {
606       if (shader->selector->info.stage == MESA_SHADER_VERTEX) {
607          STATIC_ASSERT(lds_instance_id + 1 == 7);
608          lds_vertex_size = MAX2(lds_vertex_size, 7);
609       } else {
610          assert(shader->selector->info.stage == MESA_SHADER_TESS_EVAL);
611 
612          if (shader->selector->info.uses_primid || shader->key.mono.u.vs_export_prim_id) {
613             STATIC_ASSERT(lds_tes_patch_id + 2 == 9); /* +1 for LDS padding */
614             lds_vertex_size = MAX2(lds_vertex_size, 9);
615          } else {
616             STATIC_ASSERT(lds_tes_v + 1 == 7);
617             lds_vertex_size = MAX2(lds_vertex_size, 7);
618          }
619       }
620    }
621 
622    return lds_vertex_size;
623 }
624 
625 /**
626  * Returns an `[N x i32] addrspace(LDS)*` pointing at contiguous LDS storage
627  * for the vertex outputs.
628  */
ngg_nogs_vertex_ptr(struct si_shader_context * ctx,LLVMValueRef vtxid)629 static LLVMValueRef ngg_nogs_vertex_ptr(struct si_shader_context *ctx, LLVMValueRef vtxid)
630 {
631    /* The extra dword is used to avoid LDS bank conflicts. */
632    unsigned vertex_size = ngg_nogs_vertex_size(ctx->shader);
633    LLVMTypeRef ai32 = LLVMArrayType(ctx->ac.i32, vertex_size);
634    LLVMTypeRef pai32 = LLVMPointerType(ai32, AC_ADDR_SPACE_LDS);
635    LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, ctx->esgs_ring, pai32, "");
636    return LLVMBuildGEP(ctx->ac.builder, tmp, &vtxid, 1, "");
637 }
638 
si_insert_input_v4i32(struct si_shader_context * ctx,LLVMValueRef ret,struct ac_arg param,unsigned return_index)639 static LLVMValueRef si_insert_input_v4i32(struct si_shader_context *ctx, LLVMValueRef ret,
640                                           struct ac_arg param, unsigned return_index)
641 {
642    LLVMValueRef v = ac_get_arg(&ctx->ac, param);
643 
644    for (unsigned i = 0; i < 4; i++) {
645       ret = LLVMBuildInsertValue(ctx->ac.builder, ret, ac_llvm_extract_elem(&ctx->ac, v, i),
646                                  return_index + i, "");
647    }
648    return ret;
649 }
650 
load_bitmasks_2x64(struct si_shader_context * ctx,LLVMValueRef lds_ptr,LLVMValueRef tid,unsigned dw_offset,LLVMValueRef mask[4],LLVMValueRef * total_bitcount)651 static void load_bitmasks_2x64(struct si_shader_context *ctx, LLVMValueRef lds_ptr,
652                                LLVMValueRef tid,
653                                unsigned dw_offset, LLVMValueRef mask[4],
654                                LLVMValueRef *total_bitcount)
655 {
656    LLVMBuilderRef builder = ctx->ac.builder;
657    LLVMValueRef ptr64 = LLVMBuildPointerCast(
658       builder, lds_ptr, LLVMPointerType(LLVMArrayType(ctx->ac.i64, 2), AC_ADDR_SPACE_LDS), "");
659    LLVMValueRef tmp[2];
660 
661    for (unsigned i = 0; i < 2; i++)
662       tmp[i] = ac_build_alloca_undef(&ctx->ac, ctx->ac.i64, "");
663 
664    /* If all threads loaded the bitmasks, it would cause many LDS bank conflicts
665     * and the performance could decrease up to WaveSize times (32x or 64x).
666     *
667     * Therefore, only load the bitmasks in thread 0 and other threads will get them
668     * through readlane.
669     */
670    ac_build_ifcc(&ctx->ac, LLVMBuildICmp(builder, LLVMIntEQ, tid, ctx->ac.i32_0, ""), 17771);
671    for (unsigned i = 0; i < 2; i++) {
672       LLVMValueRef index = LLVMConstInt(ctx->ac.i32, dw_offset / 2 + i, 0);
673       LLVMValueRef val = LLVMBuildLoad(builder, ac_build_gep0(&ctx->ac, ptr64, index), "");
674       LLVMBuildStore(builder, val, tmp[i]);
675    }
676    ac_build_endif(&ctx->ac, 17771);
677 
678    *total_bitcount = ctx->ac.i32_0;
679 
680    for (unsigned i = 0; i < 2; i++) {
681       tmp[i] = LLVMBuildLoad(builder, tmp[i], "");
682       mask[i] = ac_build_readlane_no_opt_barrier(&ctx->ac, tmp[i], NULL);
683 
684       *total_bitcount = LLVMBuildAdd(builder, *total_bitcount,
685                                      ac_build_bit_count(&ctx->ac, mask[i]), "");
686    }
687 }
688 
689 /**
690  * Given a total thread count, update total and per-wave thread counts in input SGPRs
691  * and return the per-wave thread count.
692  *
693  * \param new_num_threads    Total thread count on the input, per-wave thread count on the output.
694  * \param tg_info            tg_info SGPR value
695  * \param tg_info_num_bits   the bit size of thread count field in tg_info
696  * \param tg_info_shift      the bit offset of the thread count field in tg_info
697  * \param wave_info          merged_wave_info SGPR value
698  * \param wave_info_num_bits the bit size of thread count field in merged_wave_info
699  * \param wave_info_shift    the bit offset of the thread count field in merged_wave_info
700  */
update_thread_counts(struct si_shader_context * ctx,LLVMValueRef * new_num_threads,LLVMValueRef * tg_info,unsigned tg_info_num_bits,unsigned tg_info_shift,LLVMValueRef * wave_info,unsigned wave_info_num_bits,unsigned wave_info_shift)701 static void update_thread_counts(struct si_shader_context *ctx, LLVMValueRef *new_num_threads,
702                                  LLVMValueRef *tg_info, unsigned tg_info_num_bits,
703                                  unsigned tg_info_shift, LLVMValueRef *wave_info,
704                                  unsigned wave_info_num_bits, unsigned wave_info_shift)
705 {
706    LLVMBuilderRef builder = ctx->ac.builder;
707 
708    /* Update the total thread count. */
709    unsigned tg_info_mask = ~(u_bit_consecutive(0, tg_info_num_bits) << tg_info_shift);
710    *tg_info = LLVMBuildAnd(builder, *tg_info, LLVMConstInt(ctx->ac.i32, tg_info_mask, 0), "");
711    *tg_info = LLVMBuildOr(
712       builder, *tg_info,
713       LLVMBuildShl(builder, *new_num_threads, LLVMConstInt(ctx->ac.i32, tg_info_shift, 0), ""), "");
714 
715    /* Update the per-wave thread count. */
716    LLVMValueRef prev_threads = LLVMBuildMul(builder, get_wave_id_in_tg(ctx),
717                                             LLVMConstInt(ctx->ac.i32, ctx->ac.wave_size, 0), "");
718    *new_num_threads = LLVMBuildSub(builder, *new_num_threads, prev_threads, "");
719    *new_num_threads = ac_build_imax(&ctx->ac, *new_num_threads, ctx->ac.i32_0);
720    *new_num_threads =
721       ac_build_imin(&ctx->ac, *new_num_threads, LLVMConstInt(ctx->ac.i32, ctx->ac.wave_size, 0));
722    unsigned wave_info_mask = ~(u_bit_consecutive(0, wave_info_num_bits) << wave_info_shift);
723    *wave_info = LLVMBuildAnd(builder, *wave_info, LLVMConstInt(ctx->ac.i32, wave_info_mask, 0), "");
724    *wave_info = LLVMBuildOr(
725       builder, *wave_info,
726       LLVMBuildShl(builder, *new_num_threads, LLVMConstInt(ctx->ac.i32, wave_info_shift, 0), ""),
727       "");
728 }
729 
730 /**
731  * Cull primitives for NGG VS or TES, then compact vertices, which happens
732  * before the VS or TES main function. Return values for the main function.
733  * Also return the position, which is passed to the shader as an input,
734  * so that we don't compute it twice.
735  */
gfx10_emit_ngg_culling_epilogue(struct ac_shader_abi * abi,unsigned max_outputs,LLVMValueRef * addrs)736 void gfx10_emit_ngg_culling_epilogue(struct ac_shader_abi *abi, unsigned max_outputs,
737                                      LLVMValueRef *addrs)
738 {
739    struct si_shader_context *ctx = si_shader_context_from_abi(abi);
740    struct si_shader *shader = ctx->shader;
741    struct si_shader_selector *sel = shader->selector;
742    struct si_shader_info *info = &sel->info;
743    LLVMBuilderRef builder = ctx->ac.builder;
744    unsigned max_waves = ctx->ac.wave_size == 64 ? 2 : 4;
745    LLVMValueRef ngg_scratch = ctx->gs_ngg_scratch;
746 
747    if (ctx->ac.wave_size == 64) {
748       ngg_scratch =  LLVMBuildPointerCast(builder, ngg_scratch,
749                                           LLVMPointerType(LLVMArrayType(ctx->ac.i64, max_waves),
750                                                           AC_ADDR_SPACE_LDS), "");
751    }
752 
753    assert(shader->key.opt.ngg_culling);
754    assert(shader->key.as_ngg);
755    assert(sel->info.stage == MESA_SHADER_VERTEX ||
756           (sel->info.stage == MESA_SHADER_TESS_EVAL && !shader->key.as_es));
757 
758    LLVMValueRef es_vtxptr = ngg_nogs_vertex_ptr(ctx, get_thread_id_in_tg(ctx));
759    unsigned pos_index = 0;
760 
761    for (unsigned i = 0; i < info->num_outputs; i++) {
762       LLVMValueRef position[4];
763 
764       switch (info->output_semantic[i]) {
765       case VARYING_SLOT_POS:
766          pos_index = i;
767          for (unsigned j = 0; j < 4; j++) {
768             position[j] = LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + j], "");
769          }
770 
771          /* Store Position.W into LDS. */
772          LLVMBuildStore(
773             builder, ac_to_integer(&ctx->ac, position[3]),
774             ac_build_gep0(&ctx->ac, es_vtxptr, LLVMConstInt(ctx->ac.i32, lds_pos_cull_w, 0)));
775 
776          /* Store Position.XY / W into LDS. */
777          for (unsigned chan = 0; chan < 2; chan++) {
778             LLVMValueRef val = ac_build_fdiv(&ctx->ac, position[chan], position[3]);
779             LLVMBuildStore(
780                builder, ac_to_integer(&ctx->ac, val),
781                ac_build_gep0(&ctx->ac, es_vtxptr, LLVMConstInt(ctx->ac.i32, lds_pos_cull_x_div_w + chan, 0)));
782          }
783          break;
784       }
785    }
786 
787    /* Initialize the packed data. */
788    LLVMBuildStore(
789       builder, ctx->ac.i32_0,
790       ac_build_gep0(&ctx->ac, es_vtxptr, LLVMConstInt(ctx->ac.i32, lds_packed_data, 0)));
791    ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
792 
793    LLVMValueRef tid = ac_get_thread_id(&ctx->ac);
794 
795    /* Initialize all but the first element of ngg_scratch to 0, because we may have less
796     * than the maximum number of waves, but we always read all values. This is where
797     * the thread bitmasks of unculled threads will be stored.
798     *
799     * ngg_scratch layout: iN_wavemask esmask[0..n]
800     */
801    ac_build_ifcc(&ctx->ac,
802                  LLVMBuildICmp(builder, LLVMIntULT, get_thread_id_in_tg(ctx),
803                                LLVMConstInt(ctx->ac.i32, max_waves - 1, 0), ""),
804                  16101);
805    {
806       LLVMValueRef index = LLVMBuildAdd(builder, tid, ctx->ac.i32_1, "");
807       LLVMBuildStore(builder, LLVMConstInt(ctx->ac.iN_wavemask, 0, 0),
808                      ac_build_gep0(&ctx->ac, ngg_scratch, index));
809    }
810    ac_build_endif(&ctx->ac, 16101);
811    ac_build_s_barrier(&ctx->ac);
812 
813    /* The hardware requires that there are no holes between unculled vertices,
814     * which means we have to pack ES threads, i.e. reduce the ES thread count
815     * and move ES input VGPRs to lower threads. The upside is that varyings
816     * are only fetched and computed for unculled vertices.
817     *
818     * Vertex compaction in GS threads:
819     *
820     * Part 1: Compute the surviving vertex mask in GS threads:
821     * - Compute 4 32-bit surviving vertex masks in LDS. (max 4 waves)
822     *   - In GS, notify ES threads whether the vertex survived.
823     *   - Barrier
824     *   - ES threads will create the mask and store it in LDS.
825     * - Barrier
826     * - Each GS thread loads the vertex masks from LDS.
827     *
828     * Part 2: Compact ES threads in GS threads:
829     * - Compute the prefix sum for all 3 vertices from the masks. These are the new
830     *   thread IDs for each vertex within the primitive.
831     * - Write the value of the old thread ID into the LDS address of the new thread ID.
832     *   The ES thread will load the old thread ID and use it to load the position, VertexID,
833     *   and InstanceID.
834     * - Update vertex indices and null flag in the GS input VGPRs.
835     * - Barrier
836     *
837     * Part 3: Update inputs GPRs
838     * - For all waves, update per-wave thread counts in input SGPRs.
839     * - In ES threads, update the ES input VGPRs (VertexID, InstanceID, TES inputs).
840     */
841 
842    LLVMValueRef vtxindex[3];
843    if (shader->key.opt.ngg_culling & SI_NGG_CULL_GS_FAST_LAUNCH_ALL) {
844       /* For the GS fast launch, the VS prologs simply puts the Vertex IDs
845        * into these VGPRs.
846        */
847       vtxindex[0] = ac_get_arg(&ctx->ac, ctx->gs_vtx01_offset);
848       vtxindex[1] = ac_get_arg(&ctx->ac, ctx->gs_vtx23_offset);
849       vtxindex[2] = ac_get_arg(&ctx->ac, ctx->gs_vtx45_offset);
850    } else {
851       vtxindex[0] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 0, 16);
852       vtxindex[1] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 16, 16);
853       vtxindex[2] = si_unpack_param(ctx, ctx->gs_vtx23_offset, 0, 16);
854    };
855    LLVMValueRef gs_vtxptr[] = {
856       ngg_nogs_vertex_ptr(ctx, vtxindex[0]),
857       ngg_nogs_vertex_ptr(ctx, vtxindex[1]),
858       ngg_nogs_vertex_ptr(ctx, vtxindex[2]),
859    };
860    es_vtxptr = ngg_nogs_vertex_ptr(ctx, get_thread_id_in_tg(ctx));
861 
862    LLVMValueRef gs_accepted = ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
863 
864    /* Do culling in GS threads. */
865    ac_build_ifcc(&ctx->ac, si_is_gs_thread(ctx), 16002);
866    {
867       /* Load positions. */
868       LLVMValueRef pos[3][4] = {};
869       for (unsigned vtx = 0; vtx < 3; vtx++) {
870          for (unsigned chan = 0; chan < 4; chan++) {
871             unsigned index;
872             if (chan == 0 || chan == 1)
873                index = lds_pos_cull_x_div_w + chan;
874             else if (chan == 3)
875                index = lds_pos_cull_w;
876             else
877                continue;
878 
879             LLVMValueRef addr =
880                ac_build_gep0(&ctx->ac, gs_vtxptr[vtx], LLVMConstInt(ctx->ac.i32, index, 0));
881             pos[vtx][chan] = LLVMBuildLoad(builder, addr, "");
882             pos[vtx][chan] = ac_to_float(&ctx->ac, pos[vtx][chan]);
883          }
884       }
885 
886       /* Load the viewport state for small prim culling. */
887       LLVMValueRef vp = ac_build_load_invariant(
888          &ctx->ac, ac_get_arg(&ctx->ac, ctx->small_prim_cull_info), ctx->ac.i32_0);
889       vp = LLVMBuildBitCast(builder, vp, ctx->ac.v4f32, "");
890       LLVMValueRef vp_scale[2], vp_translate[2];
891       vp_scale[0] = ac_llvm_extract_elem(&ctx->ac, vp, 0);
892       vp_scale[1] = ac_llvm_extract_elem(&ctx->ac, vp, 1);
893       vp_translate[0] = ac_llvm_extract_elem(&ctx->ac, vp, 2);
894       vp_translate[1] = ac_llvm_extract_elem(&ctx->ac, vp, 3);
895 
896       /* Get the small prim filter precision. */
897       LLVMValueRef small_prim_precision = si_unpack_param(ctx, ctx->vs_state_bits, 7, 4);
898       small_prim_precision =
899          LLVMBuildOr(builder, small_prim_precision, LLVMConstInt(ctx->ac.i32, 0x70, 0), "");
900       small_prim_precision =
901          LLVMBuildShl(builder, small_prim_precision, LLVMConstInt(ctx->ac.i32, 23, 0), "");
902       small_prim_precision = LLVMBuildBitCast(builder, small_prim_precision, ctx->ac.f32, "");
903 
904       /* Execute culling code. */
905       struct ac_cull_options options = {};
906       options.cull_front = shader->key.opt.ngg_culling & SI_NGG_CULL_FRONT_FACE;
907       options.cull_back = shader->key.opt.ngg_culling & SI_NGG_CULL_BACK_FACE;
908       options.cull_view_xy = shader->key.opt.ngg_culling & SI_NGG_CULL_VIEW_SMALLPRIMS;
909       options.cull_small_prims = options.cull_view_xy;
910       options.cull_zero_area = options.cull_front || options.cull_back;
911       options.cull_w = true;
912 
913       /* Tell ES threads whether their vertex survived. */
914       ac_build_ifcc(&ctx->ac,
915                     ac_cull_triangle(&ctx->ac, pos, ctx->ac.i1true, vp_scale, vp_translate,
916                                      small_prim_precision, &options),
917                     16003);
918       {
919          LLVMBuildStore(builder, ctx->ac.i32_1, gs_accepted);
920          for (unsigned vtx = 0; vtx < 3; vtx++) {
921             LLVMBuildStore(builder, ctx->ac.i8_1,
922                            si_build_gep_i8(ctx, gs_vtxptr[vtx], lds_byte0_accept_flag));
923          }
924       }
925       ac_build_endif(&ctx->ac, 16003);
926    }
927    ac_build_endif(&ctx->ac, 16002);
928    ac_build_s_barrier(&ctx->ac);
929 
930    gs_accepted = LLVMBuildLoad(builder, gs_accepted, "");
931 
932    LLVMValueRef es_accepted = ac_build_alloca(&ctx->ac, ctx->ac.i1, "");
933 
934    /* Convert the per-vertex flag to a thread bitmask in ES threads and store it in LDS. */
935    ac_build_ifcc(&ctx->ac, si_is_es_thread(ctx), 16007);
936    {
937       LLVMValueRef es_accepted_flag =
938          LLVMBuildLoad(builder, si_build_gep_i8(ctx, es_vtxptr, lds_byte0_accept_flag), "");
939 
940       LLVMValueRef es_accepted_bool =
941          LLVMBuildICmp(builder, LLVMIntNE, es_accepted_flag, ctx->ac.i8_0, "");
942       LLVMValueRef es_mask = ac_get_i1_sgpr_mask(&ctx->ac, es_accepted_bool);
943 
944       LLVMBuildStore(builder, es_accepted_bool, es_accepted);
945 
946       ac_build_ifcc(&ctx->ac, LLVMBuildICmp(builder, LLVMIntEQ, tid, ctx->ac.i32_0, ""), 16008);
947       {
948          LLVMBuildStore(builder, es_mask,
949                         ac_build_gep0(&ctx->ac, ngg_scratch, get_wave_id_in_tg(ctx)));
950       }
951       ac_build_endif(&ctx->ac, 16008);
952    }
953    ac_build_endif(&ctx->ac, 16007);
954    ac_build_s_barrier(&ctx->ac);
955 
956    /* Load the vertex masks and compute the new ES thread count. */
957    LLVMValueRef es_mask[2], new_num_es_threads, kill_wave;
958    load_bitmasks_2x64(ctx, ngg_scratch, tid, 0, es_mask, &new_num_es_threads);
959 
960    bool uses_instance_id = ctx->stage == MESA_SHADER_VERTEX &&
961                            (sel->info.uses_instanceid ||
962                             shader->key.part.vs.prolog.instance_divisor_is_one ||
963                             shader->key.part.vs.prolog.instance_divisor_is_fetched);
964    bool uses_tes_prim_id = ctx->stage == MESA_SHADER_TESS_EVAL &&
965                            (sel->info.uses_primid || shader->key.mono.u.vs_export_prim_id);
966 
967    /* ES threads compute their prefix sum, which is the new ES thread ID.
968     * Then they write the value of the old thread ID into the LDS address
969     * of the new thread ID. It will be used it to load input VGPRs from
970     * the old thread's LDS location.
971     */
972    ac_build_ifcc(&ctx->ac, LLVMBuildLoad(builder, es_accepted, ""), 16009);
973    {
974       LLVMValueRef old_id = get_thread_id_in_tg(ctx);
975       LLVMValueRef new_id = ac_prefix_bitcount_2x64(&ctx->ac, es_mask, old_id);
976       LLVMValueRef new_vtx = ngg_nogs_vertex_ptr(ctx, new_id);
977 
978       LLVMBuildStore(builder, LLVMBuildTrunc(builder, new_id, ctx->ac.i8, ""),
979                      si_build_gep_i8(ctx, es_vtxptr, lds_byte1_new_thread_id));
980 
981       /* Store Position.XYZW into LDS. */
982       for (unsigned chan = 0; chan < 4; chan++) {
983          LLVMBuildStore(
984             builder, ac_to_integer(&ctx->ac, LLVMBuildLoad(builder, addrs[4 * pos_index + chan], "")),
985             ac_build_gep0(&ctx->ac, new_vtx, LLVMConstInt(ctx->ac.i32, lds_pos_x + chan, 0)));
986       }
987 
988       /* Store VertexID and InstanceID into LDS. ES threads will have to load them
989        * from LDS after vertex compaction and use them instead of their own
990        * system values.
991        */
992       if (ctx->stage == MESA_SHADER_VERTEX) {
993          LLVMBuildStore(
994             builder, ctx->abi.vertex_id,
995             ac_build_gep0(&ctx->ac, new_vtx, LLVMConstInt(ctx->ac.i32, lds_vertex_id, 0)));
996          if (uses_instance_id) {
997             LLVMBuildStore(
998                builder, ctx->abi.instance_id,
999                ac_build_gep0(&ctx->ac, new_vtx, LLVMConstInt(ctx->ac.i32, lds_instance_id, 0)));
1000          }
1001       } else {
1002          assert(ctx->stage == MESA_SHADER_TESS_EVAL);
1003          LLVMBuildStore(builder, ac_to_integer(&ctx->ac, ac_get_arg(&ctx->ac, ctx->tes_u)),
1004                         ac_build_gep0(&ctx->ac, new_vtx, LLVMConstInt(ctx->ac.i32, lds_tes_u, 0)));
1005          LLVMBuildStore(builder, ac_to_integer(&ctx->ac, ac_get_arg(&ctx->ac, ctx->tes_v)),
1006                         ac_build_gep0(&ctx->ac, new_vtx, LLVMConstInt(ctx->ac.i32, lds_tes_v, 0)));
1007          LLVMBuildStore(builder, LLVMBuildTrunc(builder, ac_get_arg(&ctx->ac, ctx->tes_rel_patch_id), ctx->ac.i8, ""),
1008                         si_build_gep_i8(ctx, new_vtx, lds_byte2_tes_rel_patch_id));
1009          if (uses_tes_prim_id) {
1010             LLVMBuildStore(
1011                builder, ac_get_arg(&ctx->ac, ctx->args.tes_patch_id),
1012                ac_build_gep0(&ctx->ac, new_vtx, LLVMConstInt(ctx->ac.i32, lds_tes_patch_id, 0)));
1013          }
1014       }
1015    }
1016    ac_build_endif(&ctx->ac, 16009);
1017 
1018    /* Kill waves that have inactive threads. */
1019    kill_wave = LLVMBuildICmp(builder, LLVMIntULE,
1020                              ac_build_imax(&ctx->ac, new_num_es_threads, ngg_get_prim_cnt(ctx)),
1021                              LLVMBuildMul(builder, get_wave_id_in_tg(ctx),
1022                                           LLVMConstInt(ctx->ac.i32, ctx->ac.wave_size, 0), ""),
1023                              "");
1024    ac_build_ifcc(&ctx->ac, kill_wave, 19202);
1025    {
1026       /* If we are killing wave 0, send that there are no primitives
1027        * in this threadgroup.
1028        */
1029       ac_build_sendmsg_gs_alloc_req(&ctx->ac, get_wave_id_in_tg(ctx), ctx->ac.i32_0, ctx->ac.i32_0);
1030       ac_build_s_endpgm(&ctx->ac);
1031    }
1032    ac_build_endif(&ctx->ac, 19202);
1033    ac_build_s_barrier(&ctx->ac);
1034 
1035    /* Send the final vertex and primitive counts. */
1036    ac_build_sendmsg_gs_alloc_req(&ctx->ac, get_wave_id_in_tg(ctx), new_num_es_threads,
1037                                  ngg_get_prim_cnt(ctx));
1038 
1039    /* Update thread counts in SGPRs. */
1040    LLVMValueRef new_gs_tg_info = ac_get_arg(&ctx->ac, ctx->gs_tg_info);
1041    LLVMValueRef new_merged_wave_info = ac_get_arg(&ctx->ac, ctx->merged_wave_info);
1042 
1043    /* This also converts the thread count from the total count to the per-wave count. */
1044    update_thread_counts(ctx, &new_num_es_threads, &new_gs_tg_info, 9, 12, &new_merged_wave_info, 8,
1045                         0);
1046 
1047    /* Update vertex indices in VGPR0 (same format as NGG passthrough). */
1048    LLVMValueRef new_vgpr0 = ac_build_alloca_undef(&ctx->ac, ctx->ac.i32, "");
1049 
1050    /* Set the null flag at the beginning (culled), and then
1051     * overwrite it for accepted primitives.
1052     */
1053    LLVMBuildStore(builder, LLVMConstInt(ctx->ac.i32, 1u << 31, 0), new_vgpr0);
1054 
1055    /* Get vertex indices after vertex compaction. */
1056    ac_build_ifcc(&ctx->ac, LLVMBuildTrunc(builder, gs_accepted, ctx->ac.i1, ""), 16011);
1057    {
1058       struct ac_ngg_prim prim = {};
1059       prim.num_vertices = 3;
1060       prim.isnull = ctx->ac.i1false;
1061 
1062       for (unsigned vtx = 0; vtx < 3; vtx++) {
1063          prim.index[vtx] = LLVMBuildLoad(
1064             builder, si_build_gep_i8(ctx, gs_vtxptr[vtx], lds_byte1_new_thread_id), "");
1065          prim.index[vtx] = LLVMBuildZExt(builder, prim.index[vtx], ctx->ac.i32, "");
1066          prim.edgeflag[vtx] = ngg_get_initial_edgeflag(ctx, vtx);
1067       }
1068 
1069       /* Set the new GS input VGPR. */
1070       LLVMBuildStore(builder, ac_pack_prim_export(&ctx->ac, &prim), new_vgpr0);
1071    }
1072    ac_build_endif(&ctx->ac, 16011);
1073 
1074    if (gfx10_ngg_export_prim_early(shader))
1075       gfx10_ngg_build_export_prim(ctx, NULL, LLVMBuildLoad(builder, new_vgpr0, ""));
1076 
1077    /* Set the new ES input VGPRs. */
1078    LLVMValueRef es_data[4];
1079 
1080    for (unsigned i = 0; i < 4; i++)
1081       es_data[i] = ac_build_alloca_undef(&ctx->ac, ctx->ac.i32, "");
1082 
1083    ac_build_ifcc(&ctx->ac, LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, tid, new_num_es_threads, ""),
1084                  16012);
1085    {
1086       LLVMValueRef tmp;
1087 
1088       for (unsigned i = 0; i < 2; i++) {
1089          tmp = LLVMBuildLoad(
1090             builder,
1091             ac_build_gep0(&ctx->ac, es_vtxptr, LLVMConstInt(ctx->ac.i32, lds_vertex_id + i, 0)),
1092             "");
1093          LLVMBuildStore(builder, tmp, es_data[i]);
1094       }
1095 
1096       if (ctx->stage == MESA_SHADER_TESS_EVAL) {
1097          tmp = LLVMBuildLoad(builder,
1098                              si_build_gep_i8(ctx, es_vtxptr, lds_byte2_tes_rel_patch_id), "");
1099          tmp = LLVMBuildZExt(builder, tmp, ctx->ac.i32, "");
1100          LLVMBuildStore(builder, tmp, es_data[2]);
1101 
1102          if (uses_tes_prim_id) {
1103             tmp = LLVMBuildLoad(builder,
1104                                 ac_build_gep0(&ctx->ac, es_vtxptr,
1105                                               LLVMConstInt(ctx->ac.i32, lds_tes_patch_id, 0)),
1106                                 "");
1107             LLVMBuildStore(builder, tmp, es_data[3]);
1108          }
1109       }
1110    }
1111    ac_build_endif(&ctx->ac, 16012);
1112 
1113    /* Return values for the main function. */
1114    LLVMValueRef ret = ctx->return_value;
1115    LLVMValueRef val;
1116 
1117    ret = LLVMBuildInsertValue(ctx->ac.builder, ret, new_gs_tg_info, 2, "");
1118    ret = LLVMBuildInsertValue(ctx->ac.builder, ret, new_merged_wave_info, 3, "");
1119    if (ctx->stage == MESA_SHADER_TESS_EVAL)
1120       ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_offset, 4);
1121 
1122    ret = si_insert_input_ptr(ctx, ret, ctx->rw_buffers, 8 + SI_SGPR_RW_BUFFERS);
1123    ret = si_insert_input_ptr(ctx, ret, ctx->bindless_samplers_and_images,
1124                              8 + SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES);
1125    ret = si_insert_input_ptr(ctx, ret, ctx->const_and_shader_buffers,
1126                              8 + SI_SGPR_CONST_AND_SHADER_BUFFERS);
1127    ret = si_insert_input_ptr(ctx, ret, ctx->samplers_and_images, 8 + SI_SGPR_SAMPLERS_AND_IMAGES);
1128    ret = si_insert_input_ptr(ctx, ret, ctx->vs_state_bits, 8 + SI_SGPR_VS_STATE_BITS);
1129 
1130    if (ctx->stage == MESA_SHADER_VERTEX) {
1131       ret = si_insert_input_ptr(ctx, ret, ctx->args.base_vertex, 8 + SI_SGPR_BASE_VERTEX);
1132       ret = si_insert_input_ptr(ctx, ret, ctx->args.start_instance, 8 + SI_SGPR_START_INSTANCE);
1133       ret = si_insert_input_ptr(ctx, ret, ctx->args.draw_id, 8 + SI_SGPR_DRAWID);
1134       ret = si_insert_input_ptr(ctx, ret, ctx->vertex_buffers, 8 + SI_VS_NUM_USER_SGPR);
1135 
1136       for (unsigned i = 0; i < shader->selector->num_vbos_in_user_sgprs; i++) {
1137          ret = si_insert_input_v4i32(ctx, ret, ctx->vb_descriptors[i],
1138                                      8 + SI_SGPR_VS_VB_DESCRIPTOR_FIRST + i * 4);
1139       }
1140    } else {
1141       assert(ctx->stage == MESA_SHADER_TESS_EVAL);
1142       ret = si_insert_input_ptr(ctx, ret, ctx->tcs_offchip_layout, 8 + SI_SGPR_TES_OFFCHIP_LAYOUT);
1143       ret = si_insert_input_ptr(ctx, ret, ctx->tes_offchip_addr, 8 + SI_SGPR_TES_OFFCHIP_ADDR);
1144    }
1145 
1146    unsigned vgpr;
1147    if (ctx->stage == MESA_SHADER_VERTEX) {
1148       if (shader->selector->num_vbos_in_user_sgprs) {
1149          vgpr = 8 + SI_SGPR_VS_VB_DESCRIPTOR_FIRST + shader->selector->num_vbos_in_user_sgprs * 4;
1150       } else {
1151          vgpr = 8 + GFX9_VSGS_NUM_USER_SGPR + 1;
1152       }
1153    } else {
1154       vgpr = 8 + GFX9_TESGS_NUM_USER_SGPR;
1155    }
1156 
1157    val = LLVMBuildLoad(builder, new_vgpr0, "");
1158    ret = LLVMBuildInsertValue(builder, ret, ac_to_float(&ctx->ac, val), vgpr++, "");
1159    vgpr++; /* gs_vtx23_offset */
1160 
1161    ret = si_insert_input_ret_float(ctx, ret, ctx->args.gs_prim_id, vgpr++);
1162    ret = si_insert_input_ret_float(ctx, ret, ctx->args.gs_invocation_id, vgpr++);
1163    vgpr++; /* gs_vtx45_offset */
1164 
1165    if (ctx->stage == MESA_SHADER_VERTEX) {
1166       val = LLVMBuildLoad(builder, es_data[0], "");
1167       ret = LLVMBuildInsertValue(builder, ret, ac_to_float(&ctx->ac, val), vgpr++,
1168                                  ""); /* VGPR5 - VertexID */
1169       vgpr += 2;
1170       if (uses_instance_id) {
1171          val = LLVMBuildLoad(builder, es_data[1], "");
1172          ret = LLVMBuildInsertValue(builder, ret, ac_to_float(&ctx->ac, val), vgpr++,
1173                                     ""); /* VGPR8 - InstanceID */
1174       } else {
1175          vgpr++;
1176       }
1177    } else {
1178       assert(ctx->stage == MESA_SHADER_TESS_EVAL);
1179       unsigned num_vgprs = uses_tes_prim_id ? 4 : 3;
1180       for (unsigned i = 0; i < num_vgprs; i++) {
1181          val = LLVMBuildLoad(builder, es_data[i], "");
1182          ret = LLVMBuildInsertValue(builder, ret, ac_to_float(&ctx->ac, val), vgpr++, "");
1183       }
1184       if (num_vgprs == 3)
1185          vgpr++;
1186    }
1187 
1188    /* These two also use LDS. */
1189    if (sel->info.writes_edgeflag ||
1190        (ctx->stage == MESA_SHADER_VERTEX && shader->key.mono.u.vs_export_prim_id))
1191       ac_build_s_barrier(&ctx->ac);
1192 
1193    ctx->return_value = ret;
1194 }
1195 
1196 /**
1197  * Emit the epilogue of an API VS or TES shader compiled as ESGS shader.
1198  */
gfx10_emit_ngg_epilogue(struct ac_shader_abi * abi,unsigned max_outputs,LLVMValueRef * addrs)1199 void gfx10_emit_ngg_epilogue(struct ac_shader_abi *abi, unsigned max_outputs, LLVMValueRef *addrs)
1200 {
1201    struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1202    struct si_shader_selector *sel = ctx->shader->selector;
1203    struct si_shader_info *info = &sel->info;
1204    struct si_shader_output_values outputs[PIPE_MAX_SHADER_OUTPUTS];
1205    LLVMBuilderRef builder = ctx->ac.builder;
1206    LLVMValueRef tmp, tmp2;
1207 
1208    assert(!ctx->shader->is_gs_copy_shader);
1209    assert(info->num_outputs <= max_outputs);
1210 
1211    LLVMValueRef vertex_ptr = NULL;
1212 
1213    if (sel->so.num_outputs || sel->info.writes_edgeflag)
1214       vertex_ptr = ngg_nogs_vertex_ptr(ctx, get_thread_id_in_tg(ctx));
1215 
1216    for (unsigned i = 0; i < info->num_outputs; i++) {
1217       outputs[i].semantic = info->output_semantic[i];
1218 
1219       for (unsigned j = 0; j < 4; j++) {
1220          outputs[i].vertex_stream[j] = (info->output_streams[i] >> (2 * j)) & 3;
1221 
1222          /* TODO: we may store more outputs than streamout needs,
1223           * but streamout performance isn't that important.
1224           */
1225          if (sel->so.num_outputs) {
1226             tmp = ac_build_gep0(&ctx->ac, vertex_ptr, LLVMConstInt(ctx->ac.i32, 4 * i + j, false));
1227             tmp2 = LLVMBuildLoad(builder, addrs[4 * i + j], "");
1228             tmp2 = ac_to_integer(&ctx->ac, tmp2);
1229             LLVMBuildStore(builder, tmp2, tmp);
1230          }
1231       }
1232 
1233       /* Store the edgeflag at the end (if streamout is enabled) */
1234       if (info->output_semantic[i] == VARYING_SLOT_EDGE && sel->info.writes_edgeflag) {
1235          LLVMValueRef edgeflag = LLVMBuildLoad(builder, addrs[4 * i], "");
1236          /* The output is a float, but the hw expects a 1-bit integer. */
1237          edgeflag = LLVMBuildFPToUI(ctx->ac.builder, edgeflag, ctx->ac.i32, "");
1238          edgeflag = ac_build_umin(&ctx->ac, edgeflag, ctx->ac.i32_1);
1239 
1240          tmp = LLVMConstInt(ctx->ac.i32, ngg_nogs_vertex_size(ctx->shader) - 1, 0);
1241          tmp = ac_build_gep0(&ctx->ac, vertex_ptr, tmp);
1242          LLVMBuildStore(builder, edgeflag, tmp);
1243       }
1244    }
1245 
1246    bool unterminated_es_if_block =
1247       !sel->so.num_outputs && !sel->info.writes_edgeflag &&
1248       !ctx->screen->use_ngg_streamout && /* no query buffer */
1249       (ctx->stage != MESA_SHADER_VERTEX || !ctx->shader->key.mono.u.vs_export_prim_id);
1250 
1251    if (!unterminated_es_if_block)
1252       ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
1253 
1254    LLVMValueRef is_gs_thread = si_is_gs_thread(ctx);
1255    LLVMValueRef is_es_thread = si_is_es_thread(ctx);
1256    LLVMValueRef vtxindex[3];
1257 
1258    if (ctx->shader->key.opt.ngg_culling) {
1259       vtxindex[0] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 0, 9);
1260       vtxindex[1] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 10, 9);
1261       vtxindex[2] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 20, 9);
1262    } else {
1263       vtxindex[0] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 0, 16);
1264       vtxindex[1] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 16, 16);
1265       vtxindex[2] = si_unpack_param(ctx, ctx->gs_vtx23_offset, 0, 16);
1266    }
1267 
1268    /* Determine the number of vertices per primitive. */
1269    unsigned num_vertices;
1270    LLVMValueRef num_vertices_val = ngg_get_vertices_per_prim(ctx, &num_vertices);
1271 
1272    /* Streamout */
1273    LLVMValueRef emitted_prims = NULL;
1274 
1275    if (sel->so.num_outputs) {
1276       assert(!unterminated_es_if_block);
1277 
1278       struct ngg_streamout nggso = {};
1279       nggso.num_vertices = num_vertices_val;
1280       nggso.prim_enable[0] = is_gs_thread;
1281 
1282       for (unsigned i = 0; i < num_vertices; ++i)
1283          nggso.vertices[i] = ngg_nogs_vertex_ptr(ctx, vtxindex[i]);
1284 
1285       build_streamout(ctx, &nggso);
1286       emitted_prims = nggso.emit[0];
1287    }
1288 
1289    LLVMValueRef user_edgeflags[3] = {};
1290 
1291    if (sel->info.writes_edgeflag) {
1292       assert(!unterminated_es_if_block);
1293 
1294       /* Streamout already inserted the barrier, so don't insert it again. */
1295       if (!sel->so.num_outputs)
1296          ac_build_s_barrier(&ctx->ac);
1297 
1298       ac_build_ifcc(&ctx->ac, is_gs_thread, 5400);
1299       /* Load edge flags from ES threads and store them into VGPRs in GS threads. */
1300       for (unsigned i = 0; i < num_vertices; i++) {
1301          tmp = ngg_nogs_vertex_ptr(ctx, vtxindex[i]);
1302          tmp2 = LLVMConstInt(ctx->ac.i32, ngg_nogs_vertex_size(ctx->shader) - 1, 0);
1303          tmp = ac_build_gep0(&ctx->ac, tmp, tmp2);
1304          tmp = LLVMBuildLoad(builder, tmp, "");
1305          tmp = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
1306 
1307          user_edgeflags[i] = ac_build_alloca_undef(&ctx->ac, ctx->ac.i1, "");
1308          LLVMBuildStore(builder, tmp, user_edgeflags[i]);
1309       }
1310       ac_build_endif(&ctx->ac, 5400);
1311    }
1312 
1313    /* Copy Primitive IDs from GS threads to the LDS address corresponding
1314     * to the ES thread of the provoking vertex.
1315     */
1316    if (ctx->stage == MESA_SHADER_VERTEX && ctx->shader->key.mono.u.vs_export_prim_id) {
1317       assert(!unterminated_es_if_block);
1318 
1319       /* Streamout and edge flags use LDS. Make it idle, so that we can reuse it. */
1320       if (sel->so.num_outputs || sel->info.writes_edgeflag)
1321          ac_build_s_barrier(&ctx->ac);
1322 
1323       ac_build_ifcc(&ctx->ac, is_gs_thread, 5400);
1324       /* Extract the PROVOKING_VTX_INDEX field. */
1325       LLVMValueRef provoking_vtx_in_prim = si_unpack_param(ctx, ctx->vs_state_bits, 4, 2);
1326 
1327       /* provoking_vtx_index = vtxindex[provoking_vtx_in_prim]; */
1328       LLVMValueRef indices = ac_build_gather_values(&ctx->ac, vtxindex, 3);
1329       LLVMValueRef provoking_vtx_index =
1330          LLVMBuildExtractElement(builder, indices, provoking_vtx_in_prim, "");
1331       LLVMValueRef vertex_ptr = ngg_nogs_vertex_ptr(ctx, provoking_vtx_index);
1332 
1333       LLVMBuildStore(builder, ac_get_arg(&ctx->ac, ctx->args.gs_prim_id),
1334                      ac_build_gep0(&ctx->ac, vertex_ptr, ctx->ac.i32_0));
1335       ac_build_endif(&ctx->ac, 5400);
1336    }
1337 
1338    /* Update query buffer */
1339    if (ctx->screen->use_ngg_streamout && !info->base.vs.blit_sgprs_amd) {
1340       assert(!unterminated_es_if_block);
1341 
1342       tmp = si_unpack_param(ctx, ctx->vs_state_bits, 6, 1);
1343       tmp = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
1344       ac_build_ifcc(&ctx->ac, tmp, 5029); /* if (STREAMOUT_QUERY_ENABLED) */
1345       tmp = LLVMBuildICmp(builder, LLVMIntEQ, get_wave_id_in_tg(ctx), ctx->ac.i32_0, "");
1346       ac_build_ifcc(&ctx->ac, tmp, 5030);
1347       tmp = LLVMBuildICmp(builder, LLVMIntULE, ac_get_thread_id(&ctx->ac),
1348                           sel->so.num_outputs ? ctx->ac.i32_1 : ctx->ac.i32_0, "");
1349       ac_build_ifcc(&ctx->ac, tmp, 5031);
1350       {
1351          LLVMValueRef args[] = {
1352             ngg_get_prim_cnt(ctx),
1353             ngg_get_query_buf(ctx),
1354             LLVMConstInt(ctx->ac.i32, 16, false), /* offset of stream[0].generated_primitives */
1355             ctx->ac.i32_0,                        /* soffset */
1356             ctx->ac.i32_0,                        /* cachepolicy */
1357          };
1358 
1359          if (sel->so.num_outputs) {
1360             args[0] = ac_build_writelane(&ctx->ac, args[0], emitted_prims, ctx->ac.i32_1);
1361             args[2] = ac_build_writelane(&ctx->ac, args[2], LLVMConstInt(ctx->ac.i32, 24, false),
1362                                          ctx->ac.i32_1);
1363          }
1364 
1365          /* TODO: should this be 64-bit atomics? */
1366          ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.raw.buffer.atomic.add.i32", ctx->ac.i32, args, 5,
1367                             0);
1368       }
1369       ac_build_endif(&ctx->ac, 5031);
1370       ac_build_endif(&ctx->ac, 5030);
1371       ac_build_endif(&ctx->ac, 5029);
1372    }
1373 
1374    /* Build the primitive export. */
1375    if (!gfx10_ngg_export_prim_early(ctx->shader)) {
1376       assert(!unterminated_es_if_block);
1377       gfx10_ngg_build_export_prim(ctx, user_edgeflags, NULL);
1378    }
1379 
1380    /* Export per-vertex data (positions and parameters). */
1381    if (!unterminated_es_if_block)
1382       ac_build_ifcc(&ctx->ac, is_es_thread, 6002);
1383    {
1384       unsigned i;
1385 
1386       /* Unconditionally (re-)load the values for proper SSA form. */
1387       for (i = 0; i < info->num_outputs; i++) {
1388          /* If the NGG cull shader part computed the position, don't
1389           * use the position from the current shader part. Instead,
1390           * load it from LDS.
1391           */
1392          if (info->output_semantic[i] == VARYING_SLOT_POS &&
1393              ctx->shader->key.opt.ngg_culling) {
1394             vertex_ptr = ngg_nogs_vertex_ptr(ctx, get_thread_id_in_tg(ctx));
1395 
1396             for (unsigned j = 0; j < 4; j++) {
1397                tmp = LLVMConstInt(ctx->ac.i32, lds_pos_x + j, 0);
1398                tmp = ac_build_gep0(&ctx->ac, vertex_ptr, tmp);
1399                tmp = LLVMBuildLoad(builder, tmp, "");
1400                outputs[i].values[j] = ac_to_float(&ctx->ac, tmp);
1401             }
1402          } else {
1403             for (unsigned j = 0; j < 4; j++) {
1404                outputs[i].values[j] = LLVMBuildLoad(builder, addrs[4 * i + j], "");
1405             }
1406          }
1407       }
1408 
1409       if (ctx->shader->key.mono.u.vs_export_prim_id) {
1410          outputs[i].semantic = VARYING_SLOT_PRIMITIVE_ID;
1411 
1412          if (ctx->stage == MESA_SHADER_VERTEX) {
1413             /* Wait for GS stores to finish. */
1414             ac_build_s_barrier(&ctx->ac);
1415 
1416             tmp = ngg_nogs_vertex_ptr(ctx, get_thread_id_in_tg(ctx));
1417             tmp = ac_build_gep0(&ctx->ac, tmp, ctx->ac.i32_0);
1418             outputs[i].values[0] = LLVMBuildLoad(builder, tmp, "");
1419          } else {
1420             assert(ctx->stage == MESA_SHADER_TESS_EVAL);
1421             outputs[i].values[0] = si_get_primitive_id(ctx, 0);
1422          }
1423 
1424          outputs[i].values[0] = ac_to_float(&ctx->ac, outputs[i].values[0]);
1425          for (unsigned j = 1; j < 4; j++)
1426             outputs[i].values[j] = LLVMGetUndef(ctx->ac.f32);
1427 
1428          memset(outputs[i].vertex_stream, 0, sizeof(outputs[i].vertex_stream));
1429          i++;
1430       }
1431 
1432       si_llvm_build_vs_exports(ctx, outputs, i);
1433    }
1434    ac_build_endif(&ctx->ac, 6002);
1435 }
1436 
ngg_gs_get_vertex_storage(struct si_shader_context * ctx)1437 static LLVMValueRef ngg_gs_get_vertex_storage(struct si_shader_context *ctx)
1438 {
1439    const struct si_shader_selector *sel = ctx->shader->selector;
1440    const struct si_shader_info *info = &sel->info;
1441 
1442    LLVMTypeRef elements[2] = {
1443       LLVMArrayType(ctx->ac.i32, 4 * info->num_outputs),
1444       LLVMArrayType(ctx->ac.i8, 4),
1445    };
1446    LLVMTypeRef type = LLVMStructTypeInContext(ctx->ac.context, elements, 2, false);
1447    type = LLVMPointerType(LLVMArrayType(type, 0), AC_ADDR_SPACE_LDS);
1448    return LLVMBuildBitCast(ctx->ac.builder, ctx->gs_ngg_emit, type, "");
1449 }
1450 
1451 /**
1452  * Return a pointer to the LDS storage reserved for the N'th vertex, where N
1453  * is in emit order; that is:
1454  * - during the epilogue, N is the threadidx (relative to the entire threadgroup)
1455  * - during vertex emit, i.e. while the API GS shader invocation is running,
1456  *   N = threadidx * gs.vertices_out + emitidx
1457  *
1458  * Goals of the LDS memory layout:
1459  * 1. Eliminate bank conflicts on write for geometry shaders that have all emits
1460  *    in uniform control flow
1461  * 2. Eliminate bank conflicts on read for export if, additionally, there is no
1462  *    culling
1463  * 3. Agnostic to the number of waves (since we don't know it before compiling)
1464  * 4. Allow coalescing of LDS instructions (ds_write_b128 etc.)
1465  * 5. Avoid wasting memory.
1466  *
1467  * We use an AoS layout due to point 4 (this also helps point 3). In an AoS
1468  * layout, elimination of bank conflicts requires that each vertex occupy an
1469  * odd number of dwords. We use the additional dword to store the output stream
1470  * index as well as a flag to indicate whether this vertex ends a primitive
1471  * for rasterization.
1472  *
1473  * Swizzling is required to satisfy points 1 and 2 simultaneously.
1474  *
1475  * Vertices are stored in export order (gsthread * gs.vertices_out + emitidx).
1476  * Indices are swizzled in groups of 32, which ensures point 1 without
1477  * disturbing point 2.
1478  *
1479  * \return an LDS pointer to type {[N x i32], [4 x i8]}
1480  */
ngg_gs_vertex_ptr(struct si_shader_context * ctx,LLVMValueRef vertexidx)1481 static LLVMValueRef ngg_gs_vertex_ptr(struct si_shader_context *ctx, LLVMValueRef vertexidx)
1482 {
1483    struct si_shader_selector *sel = ctx->shader->selector;
1484    LLVMBuilderRef builder = ctx->ac.builder;
1485    LLVMValueRef storage = ngg_gs_get_vertex_storage(ctx);
1486 
1487    /* gs.vertices_out = 2^(write_stride_2exp) * some odd number */
1488    unsigned write_stride_2exp = ffs(sel->info.base.gs.vertices_out) - 1;
1489    if (write_stride_2exp) {
1490       LLVMValueRef row = LLVMBuildLShr(builder, vertexidx, LLVMConstInt(ctx->ac.i32, 5, false), "");
1491       LLVMValueRef swizzle = LLVMBuildAnd(
1492          builder, row, LLVMConstInt(ctx->ac.i32, (1u << write_stride_2exp) - 1, false), "");
1493       vertexidx = LLVMBuildXor(builder, vertexidx, swizzle, "");
1494    }
1495 
1496    return ac_build_gep0(&ctx->ac, storage, vertexidx);
1497 }
1498 
ngg_gs_emit_vertex_ptr(struct si_shader_context * ctx,LLVMValueRef gsthread,LLVMValueRef emitidx)1499 static LLVMValueRef ngg_gs_emit_vertex_ptr(struct si_shader_context *ctx, LLVMValueRef gsthread,
1500                                            LLVMValueRef emitidx)
1501 {
1502    struct si_shader_selector *sel = ctx->shader->selector;
1503    LLVMBuilderRef builder = ctx->ac.builder;
1504    LLVMValueRef tmp;
1505 
1506    tmp = LLVMConstInt(ctx->ac.i32, sel->info.base.gs.vertices_out, false);
1507    tmp = LLVMBuildMul(builder, tmp, gsthread, "");
1508    const LLVMValueRef vertexidx = LLVMBuildAdd(builder, tmp, emitidx, "");
1509    return ngg_gs_vertex_ptr(ctx, vertexidx);
1510 }
1511 
ngg_gs_get_emit_output_ptr(struct si_shader_context * ctx,LLVMValueRef vertexptr,unsigned out_idx)1512 static LLVMValueRef ngg_gs_get_emit_output_ptr(struct si_shader_context *ctx,
1513                                                LLVMValueRef vertexptr, unsigned out_idx)
1514 {
1515    LLVMValueRef gep_idx[3] = {
1516       ctx->ac.i32_0, /* implied C-style array */
1517       ctx->ac.i32_0, /* first struct entry */
1518       LLVMConstInt(ctx->ac.i32, out_idx, false),
1519    };
1520    return LLVMBuildGEP(ctx->ac.builder, vertexptr, gep_idx, 3, "");
1521 }
1522 
ngg_gs_get_emit_primflag_ptr(struct si_shader_context * ctx,LLVMValueRef vertexptr,unsigned stream)1523 static LLVMValueRef ngg_gs_get_emit_primflag_ptr(struct si_shader_context *ctx,
1524                                                  LLVMValueRef vertexptr, unsigned stream)
1525 {
1526    LLVMValueRef gep_idx[3] = {
1527       ctx->ac.i32_0, /* implied C-style array */
1528       ctx->ac.i32_1, /* second struct entry */
1529       LLVMConstInt(ctx->ac.i32, stream, false),
1530    };
1531    return LLVMBuildGEP(ctx->ac.builder, vertexptr, gep_idx, 3, "");
1532 }
1533 
gfx10_ngg_gs_emit_vertex(struct si_shader_context * ctx,unsigned stream,LLVMValueRef * addrs)1534 void gfx10_ngg_gs_emit_vertex(struct si_shader_context *ctx, unsigned stream, LLVMValueRef *addrs)
1535 {
1536    const struct si_shader_selector *sel = ctx->shader->selector;
1537    const struct si_shader_info *info = &sel->info;
1538    LLVMBuilderRef builder = ctx->ac.builder;
1539    LLVMValueRef tmp;
1540    const LLVMValueRef vertexidx = LLVMBuildLoad(builder, ctx->gs_next_vertex[stream], "");
1541 
1542    /* If this thread has already emitted the declared maximum number of
1543     * vertices, skip the write: excessive vertex emissions are not
1544     * supposed to have any effect.
1545     */
1546    const LLVMValueRef can_emit =
1547       LLVMBuildICmp(builder, LLVMIntULT, vertexidx,
1548                     LLVMConstInt(ctx->ac.i32, sel->info.base.gs.vertices_out, false), "");
1549 
1550    tmp = LLVMBuildAdd(builder, vertexidx, ctx->ac.i32_1, "");
1551    tmp = LLVMBuildSelect(builder, can_emit, tmp, vertexidx, "");
1552    LLVMBuildStore(builder, tmp, ctx->gs_next_vertex[stream]);
1553 
1554    ac_build_ifcc(&ctx->ac, can_emit, 9001);
1555 
1556    const LLVMValueRef vertexptr = ngg_gs_emit_vertex_ptr(ctx, get_thread_id_in_tg(ctx), vertexidx);
1557    unsigned out_idx = 0;
1558    for (unsigned i = 0; i < info->num_outputs; i++) {
1559       for (unsigned chan = 0; chan < 4; chan++, out_idx++) {
1560          if (!(info->output_usagemask[i] & (1 << chan)) ||
1561              ((info->output_streams[i] >> (2 * chan)) & 3) != stream)
1562             continue;
1563 
1564          LLVMValueRef out_val = LLVMBuildLoad(builder, addrs[4 * i + chan], "");
1565          out_val = ac_to_integer(&ctx->ac, out_val);
1566          LLVMBuildStore(builder, out_val, ngg_gs_get_emit_output_ptr(ctx, vertexptr, out_idx));
1567       }
1568    }
1569    assert(out_idx * 4 == sel->gsvs_vertex_size);
1570 
1571    /* Determine and store whether this vertex completed a primitive. */
1572    const LLVMValueRef curverts = LLVMBuildLoad(builder, ctx->gs_curprim_verts[stream], "");
1573 
1574    tmp = LLVMConstInt(ctx->ac.i32, u_vertices_per_prim(sel->info.base.gs.output_primitive) - 1, false);
1575    const LLVMValueRef iscompleteprim = LLVMBuildICmp(builder, LLVMIntUGE, curverts, tmp, "");
1576 
1577    /* Since the geometry shader emits triangle strips, we need to
1578     * track which primitive is odd and swap vertex indices to get
1579     * the correct vertex order.
1580     */
1581    LLVMValueRef is_odd = ctx->ac.i1false;
1582    if (stream == 0 && u_vertices_per_prim(sel->info.base.gs.output_primitive) == 3) {
1583       tmp = LLVMBuildAnd(builder, curverts, ctx->ac.i32_1, "");
1584       is_odd = LLVMBuildICmp(builder, LLVMIntEQ, tmp, ctx->ac.i32_1, "");
1585    }
1586 
1587    tmp = LLVMBuildAdd(builder, curverts, ctx->ac.i32_1, "");
1588    LLVMBuildStore(builder, tmp, ctx->gs_curprim_verts[stream]);
1589 
1590    /* The per-vertex primitive flag encoding:
1591     *   bit 0: whether this vertex finishes a primitive
1592     *   bit 1: whether the primitive is odd (if we are emitting triangle strips)
1593     */
1594    tmp = LLVMBuildZExt(builder, iscompleteprim, ctx->ac.i8, "");
1595    tmp = LLVMBuildOr(
1596       builder, tmp,
1597       LLVMBuildShl(builder, LLVMBuildZExt(builder, is_odd, ctx->ac.i8, ""), ctx->ac.i8_1, ""), "");
1598    LLVMBuildStore(builder, tmp, ngg_gs_get_emit_primflag_ptr(ctx, vertexptr, stream));
1599 
1600    tmp = LLVMBuildLoad(builder, ctx->gs_generated_prims[stream], "");
1601    tmp = LLVMBuildAdd(builder, tmp, LLVMBuildZExt(builder, iscompleteprim, ctx->ac.i32, ""), "");
1602    LLVMBuildStore(builder, tmp, ctx->gs_generated_prims[stream]);
1603 
1604    ac_build_endif(&ctx->ac, 9001);
1605 }
1606 
gfx10_ngg_gs_emit_prologue(struct si_shader_context * ctx)1607 void gfx10_ngg_gs_emit_prologue(struct si_shader_context *ctx)
1608 {
1609    /* Zero out the part of LDS scratch that is used to accumulate the
1610     * per-stream generated primitive count.
1611     */
1612    LLVMBuilderRef builder = ctx->ac.builder;
1613    LLVMValueRef scratchptr = ctx->gs_ngg_scratch;
1614    LLVMValueRef tid = get_thread_id_in_tg(ctx);
1615    LLVMValueRef tmp;
1616 
1617    tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, LLVMConstInt(ctx->ac.i32, 4, false), "");
1618    ac_build_ifcc(&ctx->ac, tmp, 5090);
1619    {
1620       LLVMValueRef ptr = ac_build_gep0(&ctx->ac, scratchptr, tid);
1621       LLVMBuildStore(builder, ctx->ac.i32_0, ptr);
1622    }
1623    ac_build_endif(&ctx->ac, 5090);
1624 
1625    ac_build_s_barrier(&ctx->ac);
1626 }
1627 
gfx10_ngg_gs_emit_epilogue(struct si_shader_context * ctx)1628 void gfx10_ngg_gs_emit_epilogue(struct si_shader_context *ctx)
1629 {
1630    const struct si_shader_selector *sel = ctx->shader->selector;
1631    const struct si_shader_info *info = &sel->info;
1632    const unsigned verts_per_prim = u_vertices_per_prim(sel->info.base.gs.output_primitive);
1633    LLVMBuilderRef builder = ctx->ac.builder;
1634    LLVMValueRef i8_0 = LLVMConstInt(ctx->ac.i8, 0, false);
1635    LLVMValueRef tmp, tmp2;
1636 
1637    /* Zero out remaining (non-emitted) primitive flags.
1638     *
1639     * Note: Alternatively, we could pass the relevant gs_next_vertex to
1640     *       the emit threads via LDS. This is likely worse in the expected
1641     *       typical case where each GS thread emits the full set of
1642     *       vertices.
1643     */
1644    for (unsigned stream = 0; stream < 4; ++stream) {
1645       if (!info->num_stream_output_components[stream])
1646          continue;
1647 
1648       const LLVMValueRef gsthread = get_thread_id_in_tg(ctx);
1649 
1650       ac_build_bgnloop(&ctx->ac, 5100);
1651 
1652       const LLVMValueRef vertexidx = LLVMBuildLoad(builder, ctx->gs_next_vertex[stream], "");
1653       tmp = LLVMBuildICmp(builder, LLVMIntUGE, vertexidx,
1654                           LLVMConstInt(ctx->ac.i32, sel->info.base.gs.vertices_out, false), "");
1655       ac_build_ifcc(&ctx->ac, tmp, 5101);
1656       ac_build_break(&ctx->ac);
1657       ac_build_endif(&ctx->ac, 5101);
1658 
1659       tmp = LLVMBuildAdd(builder, vertexidx, ctx->ac.i32_1, "");
1660       LLVMBuildStore(builder, tmp, ctx->gs_next_vertex[stream]);
1661 
1662       tmp = ngg_gs_emit_vertex_ptr(ctx, gsthread, vertexidx);
1663       LLVMBuildStore(builder, i8_0, ngg_gs_get_emit_primflag_ptr(ctx, tmp, stream));
1664 
1665       ac_build_endloop(&ctx->ac, 5100);
1666    }
1667 
1668    /* Accumulate generated primitives counts across the entire threadgroup. */
1669    for (unsigned stream = 0; stream < 4; ++stream) {
1670       if (!info->num_stream_output_components[stream])
1671          continue;
1672 
1673       LLVMValueRef numprims = LLVMBuildLoad(builder, ctx->gs_generated_prims[stream], "");
1674       numprims = ac_build_reduce(&ctx->ac, numprims, nir_op_iadd, ctx->ac.wave_size);
1675 
1676       tmp = LLVMBuildICmp(builder, LLVMIntEQ, ac_get_thread_id(&ctx->ac), ctx->ac.i32_0, "");
1677       ac_build_ifcc(&ctx->ac, tmp, 5105);
1678       {
1679          LLVMBuildAtomicRMW(
1680             builder, LLVMAtomicRMWBinOpAdd,
1681             ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, LLVMConstInt(ctx->ac.i32, stream, false)),
1682             numprims, LLVMAtomicOrderingMonotonic, false);
1683       }
1684       ac_build_endif(&ctx->ac, 5105);
1685    }
1686 
1687    ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
1688 
1689    ac_build_s_barrier(&ctx->ac);
1690 
1691    const LLVMValueRef tid = get_thread_id_in_tg(ctx);
1692    LLVMValueRef num_emit_threads = ngg_get_prim_cnt(ctx);
1693 
1694    /* Streamout */
1695    if (sel->so.num_outputs) {
1696       struct ngg_streamout nggso = {};
1697 
1698       nggso.num_vertices = LLVMConstInt(ctx->ac.i32, verts_per_prim, false);
1699 
1700       LLVMValueRef vertexptr = ngg_gs_vertex_ptr(ctx, tid);
1701       for (unsigned stream = 0; stream < 4; ++stream) {
1702          if (!info->num_stream_output_components[stream])
1703             continue;
1704 
1705          tmp = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, vertexptr, stream), "");
1706          tmp = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
1707          tmp2 = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
1708          nggso.prim_enable[stream] = LLVMBuildAnd(builder, tmp, tmp2, "");
1709       }
1710 
1711       for (unsigned i = 0; i < verts_per_prim; ++i) {
1712          tmp = LLVMBuildSub(builder, tid, LLVMConstInt(ctx->ac.i32, verts_per_prim - i - 1, false),
1713                             "");
1714          tmp = ngg_gs_vertex_ptr(ctx, tmp);
1715          nggso.vertices[i] = ac_build_gep0(&ctx->ac, tmp, ctx->ac.i32_0);
1716       }
1717 
1718       build_streamout(ctx, &nggso);
1719    }
1720 
1721    /* Write shader query data. */
1722    if (ctx->screen->use_ngg_streamout) {
1723       tmp = si_unpack_param(ctx, ctx->vs_state_bits, 6, 1);
1724       tmp = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
1725       ac_build_ifcc(&ctx->ac, tmp, 5109); /* if (STREAMOUT_QUERY_ENABLED) */
1726       unsigned num_query_comps = sel->so.num_outputs ? 8 : 4;
1727       tmp = LLVMBuildICmp(builder, LLVMIntULT, tid,
1728                           LLVMConstInt(ctx->ac.i32, num_query_comps, false), "");
1729       ac_build_ifcc(&ctx->ac, tmp, 5110);
1730       {
1731          LLVMValueRef offset;
1732          tmp = tid;
1733          if (sel->so.num_outputs)
1734             tmp = LLVMBuildAnd(builder, tmp, LLVMConstInt(ctx->ac.i32, 3, false), "");
1735          offset = LLVMBuildNUWMul(builder, tmp, LLVMConstInt(ctx->ac.i32, 32, false), "");
1736          if (sel->so.num_outputs) {
1737             tmp = LLVMBuildLShr(builder, tid, LLVMConstInt(ctx->ac.i32, 2, false), "");
1738             tmp = LLVMBuildNUWMul(builder, tmp, LLVMConstInt(ctx->ac.i32, 8, false), "");
1739             offset = LLVMBuildAdd(builder, offset, tmp, "");
1740          }
1741 
1742          tmp = LLVMBuildLoad(builder, ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tid), "");
1743          LLVMValueRef args[] = {
1744             tmp,           ngg_get_query_buf(ctx),
1745             offset,        LLVMConstInt(ctx->ac.i32, 16, false), /* soffset */
1746             ctx->ac.i32_0,                                       /* cachepolicy */
1747          };
1748          ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.raw.buffer.atomic.add.i32", ctx->ac.i32, args, 5,
1749                             0);
1750       }
1751       ac_build_endif(&ctx->ac, 5110);
1752       ac_build_endif(&ctx->ac, 5109);
1753    }
1754 
1755    /* Determine vertex liveness. */
1756    LLVMValueRef vertliveptr = ac_build_alloca(&ctx->ac, ctx->ac.i1, "vertexlive");
1757 
1758    tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
1759    ac_build_ifcc(&ctx->ac, tmp, 5120);
1760    {
1761       for (unsigned i = 0; i < verts_per_prim; ++i) {
1762          const LLVMValueRef primidx =
1763             LLVMBuildAdd(builder, tid, LLVMConstInt(ctx->ac.i32, i, false), "");
1764 
1765          if (i > 0) {
1766             tmp = LLVMBuildICmp(builder, LLVMIntULT, primidx, num_emit_threads, "");
1767             ac_build_ifcc(&ctx->ac, tmp, 5121 + i);
1768          }
1769 
1770          /* Load primitive liveness */
1771          tmp = ngg_gs_vertex_ptr(ctx, primidx);
1772          tmp = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 0), "");
1773          const LLVMValueRef primlive = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
1774 
1775          tmp = LLVMBuildLoad(builder, vertliveptr, "");
1776          tmp = LLVMBuildOr(builder, tmp, primlive, ""), LLVMBuildStore(builder, tmp, vertliveptr);
1777 
1778          if (i > 0)
1779             ac_build_endif(&ctx->ac, 5121 + i);
1780       }
1781    }
1782    ac_build_endif(&ctx->ac, 5120);
1783 
1784    /* Inclusive scan addition across the current wave. */
1785    LLVMValueRef vertlive = LLVMBuildLoad(builder, vertliveptr, "");
1786    struct ac_wg_scan vertlive_scan = {};
1787    vertlive_scan.op = nir_op_iadd;
1788    vertlive_scan.enable_reduce = true;
1789    vertlive_scan.enable_exclusive = true;
1790    vertlive_scan.src = vertlive;
1791    vertlive_scan.scratch = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, ctx->ac.i32_0);
1792    vertlive_scan.waveidx = get_wave_id_in_tg(ctx);
1793    vertlive_scan.numwaves = get_tgsize(ctx);
1794    vertlive_scan.maxwaves = 8;
1795 
1796    ac_build_wg_scan(&ctx->ac, &vertlive_scan);
1797 
1798    /* Skip all exports (including index exports) when possible. At least on
1799     * early gfx10 revisions this is also to avoid hangs.
1800     */
1801    LLVMValueRef have_exports =
1802       LLVMBuildICmp(builder, LLVMIntNE, vertlive_scan.result_reduce, ctx->ac.i32_0, "");
1803    num_emit_threads = LLVMBuildSelect(builder, have_exports, num_emit_threads, ctx->ac.i32_0, "");
1804 
1805    /* Allocate export space. Send this message as early as possible, to
1806     * hide the latency of the SQ <-> SPI roundtrip.
1807     *
1808     * Note: We could consider compacting primitives for export as well.
1809     *       PA processes 1 non-null prim / clock, but it fetches 4 DW of
1810     *       prim data per clock and skips null primitives at no additional
1811     *       cost. So compacting primitives can only be beneficial when
1812     *       there are 4 or more contiguous null primitives in the export
1813     *       (in the common case of single-dword prim exports).
1814     */
1815    ac_build_sendmsg_gs_alloc_req(&ctx->ac, get_wave_id_in_tg(ctx), vertlive_scan.result_reduce,
1816                                  num_emit_threads);
1817 
1818    /* Setup the reverse vertex compaction permutation. We re-use stream 1
1819     * of the primitive liveness flags, relying on the fact that each
1820     * threadgroup can have at most 256 threads. */
1821    ac_build_ifcc(&ctx->ac, vertlive, 5130);
1822    {
1823       tmp = ngg_gs_vertex_ptr(ctx, vertlive_scan.result_exclusive);
1824       tmp2 = LLVMBuildTrunc(builder, tid, ctx->ac.i8, "");
1825       LLVMBuildStore(builder, tmp2, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 1));
1826    }
1827    ac_build_endif(&ctx->ac, 5130);
1828 
1829    ac_build_s_barrier(&ctx->ac);
1830 
1831    /* Export primitive data */
1832    tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
1833    ac_build_ifcc(&ctx->ac, tmp, 5140);
1834    {
1835       LLVMValueRef flags;
1836       struct ac_ngg_prim prim = {};
1837       prim.num_vertices = verts_per_prim;
1838 
1839       tmp = ngg_gs_vertex_ptr(ctx, tid);
1840       flags = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 0), "");
1841       prim.isnull = LLVMBuildNot(builder, LLVMBuildTrunc(builder, flags, ctx->ac.i1, ""), "");
1842 
1843       for (unsigned i = 0; i < verts_per_prim; ++i) {
1844          prim.index[i] = LLVMBuildSub(builder, vertlive_scan.result_exclusive,
1845                                       LLVMConstInt(ctx->ac.i32, verts_per_prim - i - 1, false), "");
1846          prim.edgeflag[i] = ctx->ac.i1false;
1847       }
1848 
1849       /* Geometry shaders output triangle strips, but NGG expects triangles. */
1850       if (verts_per_prim == 3) {
1851          LLVMValueRef is_odd = LLVMBuildLShr(builder, flags, ctx->ac.i8_1, "");
1852          is_odd = LLVMBuildTrunc(builder, is_odd, ctx->ac.i1, "");
1853          LLVMValueRef flatshade_first = LLVMBuildICmp(
1854             builder, LLVMIntEQ, si_unpack_param(ctx, ctx->vs_state_bits, 4, 2), ctx->ac.i32_0, "");
1855 
1856          ac_build_triangle_strip_indices_to_triangle(&ctx->ac, is_odd, flatshade_first, prim.index);
1857       }
1858 
1859       ac_build_export_prim(&ctx->ac, &prim);
1860    }
1861    ac_build_endif(&ctx->ac, 5140);
1862 
1863    /* Export position and parameter data */
1864    tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, vertlive_scan.result_reduce, "");
1865    ac_build_ifcc(&ctx->ac, tmp, 5145);
1866    {
1867       struct si_shader_output_values outputs[PIPE_MAX_SHADER_OUTPUTS];
1868 
1869       tmp = ngg_gs_vertex_ptr(ctx, tid);
1870       tmp = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 1), "");
1871       tmp = LLVMBuildZExt(builder, tmp, ctx->ac.i32, "");
1872       const LLVMValueRef vertexptr = ngg_gs_vertex_ptr(ctx, tmp);
1873 
1874       unsigned out_idx = 0;
1875       for (unsigned i = 0; i < info->num_outputs; i++) {
1876          outputs[i].semantic = info->output_semantic[i];
1877 
1878          for (unsigned j = 0; j < 4; j++, out_idx++) {
1879             tmp = ngg_gs_get_emit_output_ptr(ctx, vertexptr, out_idx);
1880             tmp = LLVMBuildLoad(builder, tmp, "");
1881             outputs[i].values[j] = ac_to_float(&ctx->ac, tmp);
1882             outputs[i].vertex_stream[j] = (info->output_streams[i] >> (2 * j)) & 3;
1883          }
1884       }
1885 
1886       si_llvm_build_vs_exports(ctx, outputs, info->num_outputs);
1887    }
1888    ac_build_endif(&ctx->ac, 5145);
1889 }
1890 
clamp_gsprims_to_esverts(unsigned * max_gsprims,unsigned max_esverts,unsigned min_verts_per_prim,bool use_adjacency)1891 static void clamp_gsprims_to_esverts(unsigned *max_gsprims, unsigned max_esverts,
1892                                      unsigned min_verts_per_prim, bool use_adjacency)
1893 {
1894    unsigned max_reuse = max_esverts - min_verts_per_prim;
1895    if (use_adjacency)
1896       max_reuse /= 2;
1897    *max_gsprims = MIN2(*max_gsprims, 1 + max_reuse);
1898 }
1899 
gfx10_ngg_get_scratch_dw_size(struct si_shader * shader)1900 unsigned gfx10_ngg_get_scratch_dw_size(struct si_shader *shader)
1901 {
1902    const struct si_shader_selector *sel = shader->selector;
1903 
1904    if (sel->info.stage == MESA_SHADER_GEOMETRY && sel->so.num_outputs)
1905       return 44;
1906 
1907    return 8;
1908 }
1909 
1910 /**
1911  * Determine subgroup information like maximum number of vertices and prims.
1912  *
1913  * This happens before the shader is uploaded, since LDS relocations during
1914  * upload depend on the subgroup size.
1915  */
gfx10_ngg_calculate_subgroup_info(struct si_shader * shader)1916 bool gfx10_ngg_calculate_subgroup_info(struct si_shader *shader)
1917 {
1918    const struct si_shader_selector *gs_sel = shader->selector;
1919    const struct si_shader_selector *es_sel =
1920       shader->previous_stage_sel ? shader->previous_stage_sel : gs_sel;
1921    const gl_shader_stage gs_stage = gs_sel->info.stage;
1922    const unsigned gs_num_invocations = MAX2(gs_sel->info.base.gs.invocations, 1);
1923    const unsigned input_prim = si_get_input_prim(gs_sel);
1924    const bool use_adjacency =
1925       input_prim >= PIPE_PRIM_LINES_ADJACENCY && input_prim <= PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY;
1926    const unsigned max_verts_per_prim = u_vertices_per_prim(input_prim);
1927    const unsigned min_verts_per_prim = gs_stage == MESA_SHADER_GEOMETRY ? max_verts_per_prim : 1;
1928 
1929    /* All these are in dwords: */
1930    /* GE can only use 8K dwords (32KB) of LDS per workgroup.
1931     */
1932    const unsigned max_lds_size = 8 * 1024 - gfx10_ngg_get_scratch_dw_size(shader);
1933    const unsigned target_lds_size = max_lds_size;
1934    unsigned esvert_lds_size = 0;
1935    unsigned gsprim_lds_size = 0;
1936 
1937    /* All these are per subgroup: */
1938    const unsigned min_esverts = gs_sel->screen->info.chip_class >= GFX10_3 ? 29 : 24;
1939    bool max_vert_out_per_gs_instance = false;
1940    unsigned max_gsprims_base = 128; /* default prim group size clamp */
1941    unsigned max_esverts_base = 128;
1942 
1943    if (shader->key.opt.ngg_culling & SI_NGG_CULL_GS_FAST_LAUNCH_TRI_LIST) {
1944       max_gsprims_base = 128 / 3;
1945       max_esverts_base = max_gsprims_base * 3;
1946    } else if (shader->key.opt.ngg_culling & SI_NGG_CULL_GS_FAST_LAUNCH_TRI_STRIP) {
1947       max_gsprims_base = 126;
1948       max_esverts_base = 128;
1949    }
1950 
1951    /* Hardware has the following non-natural restrictions on the value
1952     * of GE_CNTL.VERT_GRP_SIZE based on based on the primitive type of
1953     * the draw:
1954     *  - at most 252 for any line input primitive type
1955     *  - at most 251 for any quad input primitive type
1956     *  - at most 251 for triangle strips with adjacency (this happens to
1957     *    be the natural limit for triangle *lists* with adjacency)
1958     */
1959    max_esverts_base = MIN2(max_esverts_base, 251 + max_verts_per_prim - 1);
1960 
1961    if (gs_stage == MESA_SHADER_GEOMETRY) {
1962       bool force_multi_cycling = false;
1963       unsigned max_out_verts_per_gsprim = gs_sel->info.base.gs.vertices_out * gs_num_invocations;
1964 
1965 retry_select_mode:
1966       if (max_out_verts_per_gsprim <= 256 && !force_multi_cycling) {
1967          if (max_out_verts_per_gsprim) {
1968             max_gsprims_base = MIN2(max_gsprims_base, 256 / max_out_verts_per_gsprim);
1969          }
1970       } else {
1971          /* Use special multi-cycling mode in which each GS
1972           * instance gets its own subgroup. Does not work with
1973           * tessellation. */
1974          max_vert_out_per_gs_instance = true;
1975          max_gsprims_base = 1;
1976          max_out_verts_per_gsprim = gs_sel->info.base.gs.vertices_out;
1977       }
1978 
1979       esvert_lds_size = es_sel->esgs_itemsize / 4;
1980       gsprim_lds_size = (gs_sel->gsvs_vertex_size / 4 + 1) * max_out_verts_per_gsprim;
1981 
1982       if (gsprim_lds_size > target_lds_size && !force_multi_cycling) {
1983          if (gs_sel->tess_turns_off_ngg || es_sel->info.stage != MESA_SHADER_TESS_EVAL) {
1984             force_multi_cycling = true;
1985             goto retry_select_mode;
1986          }
1987       }
1988    } else {
1989       /* VS and TES. */
1990       /* LDS size for passing data from ES to GS. */
1991       esvert_lds_size = ngg_nogs_vertex_size(shader);
1992    }
1993 
1994    unsigned max_gsprims = max_gsprims_base;
1995    unsigned max_esverts = max_esverts_base;
1996 
1997    if (esvert_lds_size)
1998       max_esverts = MIN2(max_esverts, target_lds_size / esvert_lds_size);
1999    if (gsprim_lds_size)
2000       max_gsprims = MIN2(max_gsprims, target_lds_size / gsprim_lds_size);
2001 
2002    max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
2003    clamp_gsprims_to_esverts(&max_gsprims, max_esverts, min_verts_per_prim, use_adjacency);
2004    assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
2005 
2006    if (esvert_lds_size || gsprim_lds_size) {
2007       /* Now that we have a rough proportionality between esverts
2008        * and gsprims based on the primitive type, scale both of them
2009        * down simultaneously based on required LDS space.
2010        *
2011        * We could be smarter about this if we knew how much vertex
2012        * reuse to expect.
2013        */
2014       unsigned lds_total = max_esverts * esvert_lds_size + max_gsprims * gsprim_lds_size;
2015       if (lds_total > target_lds_size) {
2016          max_esverts = max_esverts * target_lds_size / lds_total;
2017          max_gsprims = max_gsprims * target_lds_size / lds_total;
2018 
2019          max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
2020          clamp_gsprims_to_esverts(&max_gsprims, max_esverts, min_verts_per_prim, use_adjacency);
2021          assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
2022       }
2023    }
2024 
2025    /* Round up towards full wave sizes for better ALU utilization. */
2026    if (!max_vert_out_per_gs_instance) {
2027       const unsigned wavesize = si_get_shader_wave_size(shader);
2028       unsigned orig_max_esverts;
2029       unsigned orig_max_gsprims;
2030       do {
2031          orig_max_esverts = max_esverts;
2032          orig_max_gsprims = max_gsprims;
2033 
2034          max_esverts = align(max_esverts, wavesize);
2035          max_esverts = MIN2(max_esverts, max_esverts_base);
2036          if (esvert_lds_size)
2037             max_esverts =
2038                MIN2(max_esverts, (max_lds_size - max_gsprims * gsprim_lds_size) / esvert_lds_size);
2039          max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
2040          /* Hardware restriction: minimum value of max_esverts */
2041          max_esverts = MAX2(max_esverts, min_esverts - 1 + max_verts_per_prim);
2042 
2043          max_gsprims = align(max_gsprims, wavesize);
2044          max_gsprims = MIN2(max_gsprims, max_gsprims_base);
2045          if (gsprim_lds_size) {
2046             /* Don't count unusable vertices to the LDS size. Those are vertices above
2047              * the maximum number of vertices that can occur in the workgroup,
2048              * which is e.g. max_gsprims * 3 for triangles.
2049              */
2050             unsigned usable_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
2051             max_gsprims =
2052                MIN2(max_gsprims, (max_lds_size - usable_esverts * esvert_lds_size) / gsprim_lds_size);
2053          }
2054          clamp_gsprims_to_esverts(&max_gsprims, max_esverts, min_verts_per_prim, use_adjacency);
2055          assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
2056       } while (orig_max_esverts != max_esverts || orig_max_gsprims != max_gsprims);
2057 
2058       /* Verify the restriction. */
2059       assert(max_esverts >= min_esverts - 1 + max_verts_per_prim);
2060    } else {
2061       /* Hardware restriction: minimum value of max_esverts */
2062       max_esverts = MAX2(max_esverts, min_esverts - 1 + max_verts_per_prim);
2063    }
2064 
2065    unsigned max_out_vertices =
2066       max_vert_out_per_gs_instance
2067          ? gs_sel->info.base.gs.vertices_out
2068          : gs_stage == MESA_SHADER_GEOMETRY
2069               ? max_gsprims * gs_num_invocations * gs_sel->info.base.gs.vertices_out
2070               : max_esverts;
2071    assert(max_out_vertices <= 256);
2072 
2073    unsigned prim_amp_factor = 1;
2074    if (gs_stage == MESA_SHADER_GEOMETRY) {
2075       /* Number of output primitives per GS input primitive after
2076        * GS instancing. */
2077       prim_amp_factor = gs_sel->info.base.gs.vertices_out;
2078    }
2079 
2080    /* The GE only checks against the maximum number of ES verts after
2081     * allocating a full GS primitive. So we need to ensure that whenever
2082     * this check passes, there is enough space for a full primitive without
2083     * vertex reuse.
2084     */
2085    shader->ngg.hw_max_esverts = max_esverts - max_verts_per_prim + 1;
2086    shader->ngg.max_gsprims = max_gsprims;
2087    shader->ngg.max_out_verts = max_out_vertices;
2088    shader->ngg.prim_amp_factor = prim_amp_factor;
2089    shader->ngg.max_vert_out_per_gs_instance = max_vert_out_per_gs_instance;
2090 
2091    /* Don't count unusable vertices. */
2092    shader->gs_info.esgs_ring_size = MIN2(max_esverts, max_gsprims * max_verts_per_prim) *
2093                                     esvert_lds_size;
2094    shader->ngg.ngg_emit_size = max_gsprims * gsprim_lds_size;
2095 
2096    assert(shader->ngg.hw_max_esverts >= min_esverts); /* HW limitation */
2097 
2098    /* If asserts are disabled, we use the same conditions to return false */
2099    return max_esverts >= max_verts_per_prim && max_gsprims >= 1 &&
2100           max_out_vertices <= 256 &&
2101           shader->ngg.hw_max_esverts >= min_esverts;
2102 }
2103