/* * Copyright 2024 Valve Corporation * * SPDX-License-Identifier: MIT */ #include "ac_nir.h" #include "ac_nir_helpers.h" #include "sid.h" #include "nir_builder.h" #include "nir_xfb_info.h" void ac_nir_store_var_components(nir_builder *b, nir_variable *var, nir_def *value, unsigned component, unsigned writemask) { /* component store */ if (value->num_components != 4) { nir_def *undef = nir_undef(b, 1, value->bit_size); /* add undef component before and after value to form a vec4 */ nir_def *comp[4]; for (int i = 0; i < 4; i++) { comp[i] = (i >= component && i < component + value->num_components) ? nir_channel(b, value, i - component) : undef; } value = nir_vec(b, comp, 4); writemask <<= component; } else { /* if num_component==4, there should be no component offset */ assert(component == 0); } nir_store_var(b, var, value, writemask); } unsigned ac_nir_map_io_location(unsigned location, uint64_t mask, ac_nir_map_io_driver_location map_io) { /* Unlinked shaders: * We are unaware of the inputs of the next stage while lowering outputs. * The driver needs to pass a callback to map varyings to a fixed location. */ if (map_io) return map_io(location); /* Linked shaders: * Take advantage of knowledge of the inputs of the next stage when lowering outputs. * Map varyings to a prefix sum of the IO mask to save space in LDS or VRAM. */ assert(mask & BITFIELD64_BIT(location)); return util_bitcount64(mask & BITFIELD64_MASK(location)); } /** * This function takes an I/O intrinsic like load/store_input, * and emits a sequence that calculates the full offset of that instruction, * including a stride to the base and component offsets. */ nir_def * ac_nir_calc_io_off(nir_builder *b, nir_intrinsic_instr *intrin, nir_def *base_stride, unsigned component_stride, unsigned mapped_driver_location) { /* base is the driver_location, which is in slots (1 slot = 4x4 bytes) */ nir_def *base_op = nir_imul_imm(b, base_stride, mapped_driver_location); /* offset should be interpreted in relation to the base, * so the instruction effectively reads/writes another input/output * when it has an offset */ nir_def *offset_op = nir_imul(b, base_stride, nir_get_io_offset_src(intrin)->ssa); /* component is in bytes */ unsigned const_op = nir_intrinsic_component(intrin) * component_stride; return nir_iadd_imm_nuw(b, nir_iadd_nuw(b, base_op, offset_op), const_op); } /* Process the given store_output intrinsic and process its information. * Meant to be used for VS/TES/GS when they are the last pre-rasterization stage. * * Assumptions: * - We called nir_lower_io_to_temporaries on the shader * - 64-bit outputs are lowered * - no indirect indexing is present */ void ac_nir_gather_prerast_store_output_info(nir_builder *b, nir_intrinsic_instr *intrin, ac_nir_prerast_out *out) { assert(intrin->intrinsic == nir_intrinsic_store_output); assert(nir_src_is_const(intrin->src[1]) && !nir_src_as_uint(intrin->src[1])); const nir_io_semantics io_sem = nir_intrinsic_io_semantics(intrin); const unsigned slot = io_sem.location; nir_def *store_val = intrin->src[0].ssa; assert(store_val->bit_size == 16 || store_val->bit_size == 32); nir_def **output; nir_alu_type *type; ac_nir_prerast_per_output_info *info; if (slot >= VARYING_SLOT_VAR0_16BIT) { const unsigned index = slot - VARYING_SLOT_VAR0_16BIT; if (io_sem.high_16bits) { output = out->outputs_16bit_hi[index]; type = out->types_16bit_hi[index]; info = &out->infos_16bit_hi[index]; } else { output = out->outputs_16bit_lo[index]; type = out->types_16bit_lo[index]; info = &out->infos_16bit_lo[index]; } } else { output = out->outputs[slot]; type = out->types[slot]; info = &out->infos[slot]; } unsigned component_offset = nir_intrinsic_component(intrin); unsigned write_mask = nir_intrinsic_write_mask(intrin); nir_alu_type src_type = nir_intrinsic_src_type(intrin); assert(nir_alu_type_get_type_size(src_type) == store_val->bit_size); b->cursor = nir_before_instr(&intrin->instr); /* 16-bit output stored in a normal varying slot that isn't a dedicated 16-bit slot. */ const bool non_dedicated_16bit = slot < VARYING_SLOT_VAR0_16BIT && store_val->bit_size == 16; u_foreach_bit (i, write_mask) { const unsigned stream = (io_sem.gs_streams >> (i * 2)) & 0x3; if (b->shader->info.stage == MESA_SHADER_GEOMETRY) { if (!(b->shader->info.gs.active_stream_mask & (1 << stream))) continue; } const unsigned c = component_offset + i; /* The same output component should always belong to the same stream. */ assert(!(info->components_mask & (1 << c)) || ((info->stream >> (c * 2)) & 3) == stream); /* Components of the same output slot may belong to different streams. */ info->stream |= stream << (c * 2); info->components_mask |= BITFIELD_BIT(c); if (!io_sem.no_varying) info->as_varying_mask |= BITFIELD_BIT(c); if (!io_sem.no_sysval_output) info->as_sysval_mask |= BITFIELD_BIT(c); nir_def *store_component = nir_channel(b, intrin->src[0].ssa, i); if (non_dedicated_16bit) { if (io_sem.high_16bits) { nir_def *lo = output[c] ? nir_unpack_32_2x16_split_x(b, output[c]) : nir_imm_intN_t(b, 0, 16); output[c] = nir_pack_32_2x16_split(b, lo, store_component); } else { nir_def *hi = output[c] ? nir_unpack_32_2x16_split_y(b, output[c]) : nir_imm_intN_t(b, 0, 16); output[c] = nir_pack_32_2x16_split(b, store_component, hi); } type[c] = nir_type_uint32; } else { output[c] = store_component; type[c] = src_type; } } } static nir_intrinsic_instr * export(nir_builder *b, nir_def *val, nir_def *row, unsigned base, unsigned flags, unsigned write_mask) { if (row) { return nir_export_row_amd(b, val, row, .base = base, .flags = flags, .write_mask = write_mask); } else { return nir_export_amd(b, val, .base = base, .flags = flags, .write_mask = write_mask); } } void ac_nir_export_primitive(nir_builder *b, nir_def *prim, nir_def *row) { unsigned write_mask = BITFIELD_MASK(prim->num_components); export(b, nir_pad_vec4(b, prim), row, V_008DFC_SQ_EXP_PRIM, AC_EXP_FLAG_DONE, write_mask); } static nir_def * get_export_output(nir_builder *b, nir_def **output) { nir_def *vec[4]; for (int i = 0; i < 4; i++) { if (output[i]) vec[i] = nir_u2uN(b, output[i], 32); else vec[i] = nir_undef(b, 1, 32); } return nir_vec(b, vec, 4); } static nir_def * get_pos0_output(nir_builder *b, nir_def **output) { /* Some applications don't write position but expect (0, 0, 0, 1) * so use that value instead of undef when it isn't written. */ nir_def *vec[4] = {0}; for (int i = 0; i < 4; i++) { if (output[i]) vec[i] = nir_u2u32(b, output[i]); else vec[i] = nir_imm_float(b, i == 3 ? 1.0 : 0.0); } return nir_vec(b, vec, 4); } void ac_nir_export_position(nir_builder *b, enum amd_gfx_level gfx_level, uint32_t clip_cull_mask, bool no_param_export, bool force_vrs, bool done, uint64_t outputs_written, ac_nir_prerast_out *out, nir_def *row) { nir_intrinsic_instr *exp[4]; unsigned exp_num = 0; unsigned exp_pos_offset = 0; if (outputs_written & VARYING_BIT_POS) { /* GFX10 (Navi1x) skip POS0 exports if EXEC=0 and DONE=0, causing a hang. * Setting valid_mask=1 prevents it and has no other effect. */ const unsigned pos_flags = gfx_level == GFX10 ? AC_EXP_FLAG_VALID_MASK : 0; nir_def *pos = get_pos0_output(b, out->outputs[VARYING_SLOT_POS]); exp[exp_num] = export(b, pos, row, V_008DFC_SQ_EXP_POS + exp_num, pos_flags, 0xf); exp_num++; } else { exp_pos_offset++; } uint64_t mask = VARYING_BIT_PSIZ | VARYING_BIT_EDGE | VARYING_BIT_LAYER | VARYING_BIT_VIEWPORT | VARYING_BIT_PRIMITIVE_SHADING_RATE; /* clear output mask if no one written */ if (!out->outputs[VARYING_SLOT_PSIZ][0] || !out->infos[VARYING_SLOT_PSIZ].as_sysval_mask) outputs_written &= ~VARYING_BIT_PSIZ; if (!out->outputs[VARYING_SLOT_EDGE][0] || !out->infos[VARYING_SLOT_EDGE].as_sysval_mask) outputs_written &= ~VARYING_BIT_EDGE; if (!out->outputs[VARYING_SLOT_PRIMITIVE_SHADING_RATE][0] || !out->infos[VARYING_SLOT_PRIMITIVE_SHADING_RATE].as_sysval_mask) outputs_written &= ~VARYING_BIT_PRIMITIVE_SHADING_RATE; if (!out->outputs[VARYING_SLOT_LAYER][0] || !out->infos[VARYING_SLOT_LAYER].as_sysval_mask) outputs_written &= ~VARYING_BIT_LAYER; if (!out->outputs[VARYING_SLOT_VIEWPORT][0] || !out->infos[VARYING_SLOT_VIEWPORT].as_sysval_mask) outputs_written &= ~VARYING_BIT_VIEWPORT; if ((outputs_written & mask) || force_vrs) { nir_def *zero = nir_imm_float(b, 0); nir_def *vec[4] = { zero, zero, zero, zero }; unsigned write_mask = 0; if (outputs_written & VARYING_BIT_PSIZ) { vec[0] = out->outputs[VARYING_SLOT_PSIZ][0]; write_mask |= BITFIELD_BIT(0); } if (outputs_written & VARYING_BIT_EDGE) { vec[1] = nir_umin(b, out->outputs[VARYING_SLOT_EDGE][0], nir_imm_int(b, 1)); write_mask |= BITFIELD_BIT(1); } nir_def *rates = NULL; if (outputs_written & VARYING_BIT_PRIMITIVE_SHADING_RATE) { rates = out->outputs[VARYING_SLOT_PRIMITIVE_SHADING_RATE][0]; } else if (force_vrs) { /* If Pos.W != 1 (typical for non-GUI elements), use coarse shading. */ nir_def *pos_w = out->outputs[VARYING_SLOT_POS][3]; pos_w = pos_w ? nir_u2u32(b, pos_w) : nir_imm_float(b, 1.0); nir_def *cond = nir_fneu_imm(b, pos_w, 1); rates = nir_bcsel(b, cond, nir_load_force_vrs_rates_amd(b), nir_imm_int(b, 0)); } if (rates) { vec[1] = nir_ior(b, vec[1], rates); write_mask |= BITFIELD_BIT(1); } if (outputs_written & VARYING_BIT_LAYER) { vec[2] = out->outputs[VARYING_SLOT_LAYER][0]; write_mask |= BITFIELD_BIT(2); } if (outputs_written & VARYING_BIT_VIEWPORT) { if (gfx_level >= GFX9) { /* GFX9 has the layer in [10:0] and the viewport index in [19:16]. */ nir_def *v = nir_ishl_imm(b, out->outputs[VARYING_SLOT_VIEWPORT][0], 16); vec[2] = nir_ior(b, vec[2], v); write_mask |= BITFIELD_BIT(2); } else { vec[3] = out->outputs[VARYING_SLOT_VIEWPORT][0]; write_mask |= BITFIELD_BIT(3); } } exp[exp_num] = export(b, nir_vec(b, vec, 4), row, V_008DFC_SQ_EXP_POS + exp_num + exp_pos_offset, 0, write_mask); exp_num++; } for (int i = 0; i < 2; i++) { if ((outputs_written & (VARYING_BIT_CLIP_DIST0 << i)) && (clip_cull_mask & BITFIELD_RANGE(i * 4, 4))) { exp[exp_num] = export( b, get_export_output(b, out->outputs[VARYING_SLOT_CLIP_DIST0 + i]), row, V_008DFC_SQ_EXP_POS + exp_num + exp_pos_offset, 0, (clip_cull_mask >> (i * 4)) & 0xf); exp_num++; } } if (outputs_written & VARYING_BIT_CLIP_VERTEX) { nir_def *vtx = get_export_output(b, out->outputs[VARYING_SLOT_CLIP_VERTEX]); /* Clip distance for clip vertex to each user clip plane. */ nir_def *clip_dist[8] = {0}; u_foreach_bit (i, clip_cull_mask) { nir_def *ucp = nir_load_user_clip_plane(b, .ucp_id = i); clip_dist[i] = nir_fdot4(b, vtx, ucp); } for (int i = 0; i < 2; i++) { if (clip_cull_mask & BITFIELD_RANGE(i * 4, 4)) { exp[exp_num] = export( b, get_export_output(b, clip_dist + i * 4), row, V_008DFC_SQ_EXP_POS + exp_num + exp_pos_offset, 0, (clip_cull_mask >> (i * 4)) & 0xf); exp_num++; } } } if (!exp_num) return; nir_intrinsic_instr *final_exp = exp[exp_num - 1]; if (done) { /* Specify that this is the last export */ const unsigned final_exp_flags = nir_intrinsic_flags(final_exp); nir_intrinsic_set_flags(final_exp, final_exp_flags | AC_EXP_FLAG_DONE); } /* If a shader has no param exports, rasterization can start before * the shader finishes and thus memory stores might not finish before * the pixel shader starts. */ if (gfx_level >= GFX10 && no_param_export && b->shader->info.writes_memory) { nir_cursor cursor = b->cursor; b->cursor = nir_before_instr(&final_exp->instr); nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_RELEASE, nir_var_mem_ssbo | nir_var_mem_global | nir_var_image); b->cursor = cursor; } } void ac_nir_export_parameters(nir_builder *b, const uint8_t *param_offsets, uint64_t outputs_written, uint16_t outputs_written_16bit, ac_nir_prerast_out *out) { uint32_t exported_params = 0; u_foreach_bit64 (slot, outputs_written) { unsigned offset = param_offsets[slot]; if (offset > AC_EXP_PARAM_OFFSET_31) continue; uint32_t write_mask = 0; for (int i = 0; i < 4; i++) { if (out->outputs[slot][i]) write_mask |= (out->infos[slot].as_varying_mask & BITFIELD_BIT(i)); } /* no one set this output slot, we can skip the param export */ if (!write_mask) continue; /* Since param_offsets[] can map multiple varying slots to the same * param export index (that's radeonsi-specific behavior), we need to * do this so as not to emit duplicated exports. */ if (exported_params & BITFIELD_BIT(offset)) continue; nir_export_amd( b, get_export_output(b, out->outputs[slot]), .base = V_008DFC_SQ_EXP_PARAM + offset, .write_mask = write_mask); exported_params |= BITFIELD_BIT(offset); } u_foreach_bit (slot, outputs_written_16bit) { unsigned offset = param_offsets[VARYING_SLOT_VAR0_16BIT + slot]; if (offset > AC_EXP_PARAM_OFFSET_31) continue; uint32_t write_mask = 0; for (int i = 0; i < 4; i++) { if (out->outputs_16bit_lo[slot][i] || out->outputs_16bit_hi[slot][i]) write_mask |= BITFIELD_BIT(i); } /* no one set this output slot, we can skip the param export */ if (!write_mask) continue; /* Since param_offsets[] can map multiple varying slots to the same * param export index (that's radeonsi-specific behavior), we need to * do this so as not to emit duplicated exports. */ if (exported_params & BITFIELD_BIT(offset)) continue; nir_def *vec[4]; nir_def *undef = nir_undef(b, 1, 16); for (int i = 0; i < 4; i++) { nir_def *lo = out->outputs_16bit_lo[slot][i] ? out->outputs_16bit_lo[slot][i] : undef; nir_def *hi = out->outputs_16bit_hi[slot][i] ? out->outputs_16bit_hi[slot][i] : undef; vec[i] = nir_pack_32_2x16_split(b, lo, hi); } nir_export_amd( b, nir_vec(b, vec, 4), .base = V_008DFC_SQ_EXP_PARAM + offset, .write_mask = write_mask); exported_params |= BITFIELD_BIT(offset); } } void ac_nir_store_parameters_to_attr_ring(nir_builder *b, const uint8_t *param_offsets, const uint64_t outputs_written, const uint16_t outputs_written_16bit, ac_nir_prerast_out *out, nir_def *export_tid, nir_def *num_export_threads) { nir_def *attr_rsrc = nir_load_ring_attr_amd(b); /* We should always store full vec4s in groups of 8 lanes for the best performance even if * some of them are garbage or have unused components, so align the number of export threads * to 8. */ num_export_threads = nir_iand_imm(b, nir_iadd_imm(b, num_export_threads, 7), ~7); if (!export_tid) nir_push_if(b, nir_is_subgroup_invocation_lt_amd(b, num_export_threads)); else nir_push_if(b, nir_ult(b, export_tid, num_export_threads)); nir_def *attr_offset = nir_load_ring_attr_offset_amd(b); nir_def *vindex = nir_load_local_invocation_index(b); nir_def *voffset = nir_imm_int(b, 0); nir_def *undef = nir_undef(b, 1, 32); uint32_t exported_params = 0; u_foreach_bit64 (slot, outputs_written) { const unsigned offset = param_offsets[slot]; if (offset > AC_EXP_PARAM_OFFSET_31) continue; if (!out->infos[slot].as_varying_mask) continue; if (exported_params & BITFIELD_BIT(offset)) continue; nir_def *comp[4]; for (unsigned j = 0; j < 4; j++) { comp[j] = out->outputs[slot][j] ? out->outputs[slot][j] : undef; } nir_store_buffer_amd(b, nir_vec(b, comp, 4), attr_rsrc, voffset, attr_offset, vindex, .base = offset * 16, .memory_modes = nir_var_shader_out, .access = ACCESS_COHERENT | ACCESS_IS_SWIZZLED_AMD); exported_params |= BITFIELD_BIT(offset); } u_foreach_bit (i, outputs_written_16bit) { const unsigned offset = param_offsets[VARYING_SLOT_VAR0_16BIT + i]; if (offset > AC_EXP_PARAM_OFFSET_31) continue; if (!out->infos_16bit_lo[i].as_varying_mask && !out->infos_16bit_hi[i].as_varying_mask) continue; if (exported_params & BITFIELD_BIT(offset)) continue; nir_def *comp[4]; for (unsigned j = 0; j < 4; j++) { nir_def *lo = out->outputs_16bit_lo[i][j] ? out->outputs_16bit_lo[i][j] : undef; nir_def *hi = out->outputs_16bit_hi[i][j] ? out->outputs_16bit_hi[i][j] : undef; comp[j] = nir_pack_32_2x16_split(b, lo, hi); } nir_store_buffer_amd(b, nir_vec(b, comp, 4), attr_rsrc, voffset, attr_offset, vindex, .base = offset * 16, .memory_modes = nir_var_shader_out, .access = ACCESS_COHERENT | ACCESS_IS_SWIZZLED_AMD); exported_params |= BITFIELD_BIT(offset); } nir_pop_if(b, NULL); } static int sort_xfb(const void *_a, const void *_b) { const nir_xfb_output_info *a = (const nir_xfb_output_info *)_a; const nir_xfb_output_info *b = (const nir_xfb_output_info *)_b; if (a->buffer != b->buffer) return a->buffer > b->buffer ? 1 : -1; assert(a->offset != b->offset); return a->offset > b->offset ? 1 : -1; } /* Return XFB info sorted by buffer and offset, so that we can generate vec4 * stores by iterating over outputs only once. */ nir_xfb_info * ac_nir_get_sorted_xfb_info(const nir_shader *nir) { if (!nir->xfb_info) return NULL; unsigned xfb_info_size = nir_xfb_info_size(nir->xfb_info->output_count); nir_xfb_info *info = rzalloc_size(nir, xfb_info_size); memcpy(info, nir->xfb_info, xfb_info_size); qsort(info->outputs, info->output_count, sizeof(info->outputs[0]), sort_xfb); return info; } static nir_def ** get_output_and_type(ac_nir_prerast_out *out, unsigned slot, bool high_16bits, nir_alu_type **types) { nir_def **data; nir_alu_type *type; /* Only VARYING_SLOT_VARn_16BIT slots need output type to convert 16bit output * to 32bit. Vulkan is not allowed to streamout output less than 32bit. */ if (slot < VARYING_SLOT_VAR0_16BIT) { data = out->outputs[slot]; type = NULL; } else { unsigned index = slot - VARYING_SLOT_VAR0_16BIT; if (high_16bits) { data = out->outputs_16bit_hi[index]; type = out->types_16bit_hi[index]; } else { data = out->outputs[index]; type = out->types_16bit_lo[index]; } } *types = type; return data; } void ac_nir_emit_legacy_streamout(nir_builder *b, unsigned stream, nir_xfb_info *info, ac_nir_prerast_out *out) { nir_def *so_vtx_count = nir_ubfe_imm(b, nir_load_streamout_config_amd(b), 16, 7); nir_def *tid = nir_load_subgroup_invocation(b); nir_push_if(b, nir_ilt(b, tid, so_vtx_count)); nir_def *so_write_index = nir_load_streamout_write_index_amd(b); nir_def *so_buffers[NIR_MAX_XFB_BUFFERS]; nir_def *so_write_offset[NIR_MAX_XFB_BUFFERS]; u_foreach_bit(i, info->buffers_written) { so_buffers[i] = nir_load_streamout_buffer_amd(b, i); unsigned stride = info->buffers[i].stride; nir_def *offset = nir_load_streamout_offset_amd(b, i); offset = nir_iadd(b, nir_imul_imm(b, nir_iadd(b, so_write_index, tid), stride), nir_imul_imm(b, offset, 4)); so_write_offset[i] = offset; } nir_def *zero = nir_imm_int(b, 0); unsigned num_values = 0, store_offset = 0, store_buffer_index = 0; nir_def *values[4]; for (unsigned i = 0; i < info->output_count; i++) { const nir_xfb_output_info *output = info->outputs + i; if (stream != info->buffer_to_stream[output->buffer]) continue; nir_alu_type *output_type; nir_def **output_data = get_output_and_type(out, output->location, output->high_16bits, &output_type); u_foreach_bit(out_comp, output->component_mask) { if (!output_data[out_comp]) continue; nir_def *data = output_data[out_comp]; if (data->bit_size < 32) { /* Convert the 16-bit output to 32 bits. */ assert(output_type); nir_alu_type base_type = nir_alu_type_get_base_type(output_type[out_comp]); data = nir_convert_to_bit_size(b, data, base_type, 32); } assert(out_comp >= output->component_offset); const unsigned store_comp = out_comp - output->component_offset; const unsigned store_comp_offset = output->offset + store_comp * 4; const bool has_hole = store_offset + num_values * 4 != store_comp_offset; /* Flush the gathered components to memory as a vec4 store or less if there is a hole. */ if (num_values && (num_values == 4 || store_buffer_index != output->buffer || has_hole)) { nir_store_buffer_amd(b, nir_vec(b, values, num_values), so_buffers[store_buffer_index], so_write_offset[store_buffer_index], zero, zero, .base = store_offset, .access = ACCESS_NON_TEMPORAL); num_values = 0; } /* Initialize the buffer index and offset if we are beginning a new vec4 store. */ if (num_values == 0) { store_buffer_index = output->buffer; store_offset = store_comp_offset; } values[num_values++] = data; } } if (num_values) { /* Flush the remaining components to memory (as an up to vec4 store) */ nir_store_buffer_amd(b, nir_vec(b, values, num_values), so_buffers[store_buffer_index], so_write_offset[store_buffer_index], zero, zero, .base = store_offset, .access = ACCESS_NON_TEMPORAL); } nir_pop_if(b, NULL); } static nir_def * ac_nir_accum_ior(nir_builder *b, nir_def *accum_result, nir_def *new_term) { return accum_result ? nir_ior(b, accum_result, new_term) : new_term; } bool ac_nir_gs_shader_query(nir_builder *b, bool has_gen_prim_query, bool has_gs_invocations_query, bool has_gs_primitives_query, unsigned num_vertices_per_primitive, unsigned wave_size, nir_def *vertex_count[4], nir_def *primitive_count[4]) { nir_def *pipeline_query_enabled = NULL; nir_def *prim_gen_query_enabled = NULL; nir_def *any_query_enabled = NULL; if (has_gen_prim_query) { prim_gen_query_enabled = nir_load_prim_gen_query_enabled_amd(b); any_query_enabled = ac_nir_accum_ior(b, any_query_enabled, prim_gen_query_enabled); } if (has_gs_invocations_query || has_gs_primitives_query) { pipeline_query_enabled = nir_load_pipeline_stat_query_enabled_amd(b); any_query_enabled = ac_nir_accum_ior(b, any_query_enabled, pipeline_query_enabled); } if (!any_query_enabled) { /* has no query */ return false; } nir_if *if_shader_query = nir_push_if(b, any_query_enabled); nir_def *active_threads_mask = nir_ballot(b, 1, wave_size, nir_imm_true(b)); nir_def *num_active_threads = nir_bit_count(b, active_threads_mask); /* Calculate the "real" number of emitted primitives from the emitted GS vertices and primitives. * GS emits points, line strips or triangle strips. * Real primitives are points, lines or triangles. */ nir_def *num_prims_in_wave[4] = {0}; u_foreach_bit (i, b->shader->info.gs.active_stream_mask) { assert(vertex_count[i] && primitive_count[i]); nir_scalar vtx_cnt = nir_get_scalar(vertex_count[i], 0); nir_scalar prm_cnt = nir_get_scalar(primitive_count[i], 0); if (nir_scalar_is_const(vtx_cnt) && nir_scalar_is_const(prm_cnt)) { unsigned gs_vtx_cnt = nir_scalar_as_uint(vtx_cnt); unsigned gs_prm_cnt = nir_scalar_as_uint(prm_cnt); unsigned total_prm_cnt = gs_vtx_cnt - gs_prm_cnt * (num_vertices_per_primitive - 1u); if (total_prm_cnt == 0) continue; num_prims_in_wave[i] = nir_imul_imm(b, num_active_threads, total_prm_cnt); } else { nir_def *gs_vtx_cnt = vtx_cnt.def; nir_def *gs_prm_cnt = prm_cnt.def; if (num_vertices_per_primitive > 1) gs_prm_cnt = nir_iadd(b, nir_imul_imm(b, gs_prm_cnt, -1u * (num_vertices_per_primitive - 1)), gs_vtx_cnt); num_prims_in_wave[i] = nir_reduce(b, gs_prm_cnt, .reduction_op = nir_op_iadd); } } /* Store the query result to query result using an atomic add. */ nir_if *if_first_lane = nir_push_if(b, nir_elect(b, 1)); { if (has_gs_invocations_query || has_gs_primitives_query) { nir_if *if_pipeline_query = nir_push_if(b, pipeline_query_enabled); { nir_def *count = NULL; /* Add all streams' number to the same counter. */ for (int i = 0; i < 4; i++) { if (num_prims_in_wave[i]) { if (count) count = nir_iadd(b, count, num_prims_in_wave[i]); else count = num_prims_in_wave[i]; } } if (has_gs_primitives_query && count) nir_atomic_add_gs_emit_prim_count_amd(b, count); if (has_gs_invocations_query) nir_atomic_add_shader_invocation_count_amd(b, num_active_threads); } nir_pop_if(b, if_pipeline_query); } if (has_gen_prim_query) { nir_if *if_prim_gen_query = nir_push_if(b, prim_gen_query_enabled); { /* Add to the counter for this stream. */ for (int i = 0; i < 4; i++) { if (num_prims_in_wave[i]) nir_atomic_add_gen_prim_count_amd(b, num_prims_in_wave[i], .stream_id = i); } } nir_pop_if(b, if_prim_gen_query); } } nir_pop_if(b, if_first_lane); nir_pop_if(b, if_shader_query); return true; } nir_def * ac_nir_pack_ngg_prim_exp_arg(nir_builder *b, unsigned num_vertices_per_primitives, nir_def *vertex_indices[3], nir_def *is_null_prim, enum amd_gfx_level gfx_level) { nir_def *arg = nir_load_initial_edgeflags_amd(b); for (unsigned i = 0; i < num_vertices_per_primitives; ++i) { assert(vertex_indices[i]); arg = nir_ior(b, arg, nir_ishl_imm(b, vertex_indices[i], (gfx_level >= GFX12 ? 9u : 10u) * i)); } if (is_null_prim) { if (is_null_prim->bit_size == 1) is_null_prim = nir_b2i32(b, is_null_prim); assert(is_null_prim->bit_size == 32); arg = nir_ior(b, arg, nir_ishl_imm(b, is_null_prim, 31u)); } return arg; }