/external/mesa3d/src/gallium/drivers/etnaviv/ |
D | etnaviv_nir.c | 49 intr->dest.ssa.bit_size = 32; in etna_lower_io() 53 nir_ssa_def *ssa = nir_ine(&b, &intr->dest.ssa, nir_imm_int(&b, 0)); in etna_lower_io() local 55 nir_instr_as_alu(ssa->parent_instr)->op = nir_op_ieq; in etna_lower_io() 57 nir_ssa_def_rewrite_uses_after(&intr->dest.ssa, in etna_lower_io() 58 nir_src_for_ssa(ssa), in etna_lower_io() 59 ssa->parent_instr); in etna_lower_io() 74 nir_ssa_def *ssa = nir_mov(&b, intr->src[1].ssa); in etna_lower_io() local 75 nir_alu_instr *alu = nir_instr_as_alu(ssa->parent_instr); in etna_lower_io() 78 nir_instr_rewrite_src(instr, &intr->src[1], nir_src_for_ssa(ssa)); in etna_lower_io() 91 nir_intrinsic_set_align(load_ubo, intr->dest.ssa.bit_size / 8, 0); in etna_lower_io() [all …]
|
D | etnaviv_compiler_nir.h | 86 return src->is_ssa ? src->ssa->index : (src->reg.reg->index + impl->ssa_alloc); in src_index() 93 return dest->is_ssa ? dest->ssa.index : (dest->reg.reg->index + impl->ssa_alloc); in dest_index() 109 if (is_vec && alu->src[i].src.ssa != &dest->ssa) in update_swiz_mask() 128 bool can_bypass_src = !list_length(&dest->ssa.if_uses); in real_dest() 129 nir_instr *p_instr = dest->ssa.parent_instr; in real_dest() 135 nir_foreach_use(use_src, &dest->ssa) { in real_dest() 162 assert(list_length(&dest->ssa.if_uses) == 0); in real_dest() 163 nir_foreach_use(use_src, &dest->ssa) in real_dest() 169 switch (dest->ssa.parent_instr->type) { in real_dest() 176 if (list_length(&dest->ssa.if_uses) || list_length(&dest->ssa.uses) > 1) in real_dest()
|
/external/mesa3d/src/compiler/nir/ |
D | nir_divergence_analysis.c | 65 if (instr->dest.dest.ssa.divergent) in visit_alu() 71 if (instr->src[i].src.ssa->divergent) { in visit_alu() 72 instr->dest.dest.ssa.divergent = true; in visit_alu() 86 if (instr->dest.ssa.divergent) in visit_intrinsic() 140 is_divergent = instr->src[0].ssa->divergent; in visit_intrinsic() 149 is_divergent = instr->src[0].ssa->divergent || in visit_intrinsic() 150 instr->src[1].ssa->divergent; in visit_intrinsic() 159 is_divergent = instr->src[1].ssa->divergent; in visit_intrinsic() 165 is_divergent = instr->src[0].ssa->divergent; in visit_intrinsic() 173 is_divergent = instr->src[0].ssa->divergent || in visit_intrinsic() [all …]
|
D | nir_lower_subgroups.c | 37 comp = nir_unpack_64_2x32_split_x(b, intrin->src[0].ssa); in lower_subgroups_64bit_split_intrinsic() 39 comp = nir_unpack_64_2x32_split_y(b, intrin->src[0].ssa); in lower_subgroups_64bit_split_intrinsic() 57 assert(intrin->src[0].ssa->bit_size == 64); in lower_subgroup_op_to_32bit() 60 return nir_pack_64_2x32_split(b, &intr_x->dest.ssa, &intr_y->dest.ssa); in lower_subgroup_op_to_32bit() 119 assert(intrin->dest.ssa.num_components > 1); in lower_subgroup_op_to_scalar() 129 1, intrin->dest.ssa.bit_size, NULL); in lower_subgroup_op_to_scalar() 143 if (lower_to_32bit && chan_intrin->src[0].ssa->bit_size == 64) { in lower_subgroup_op_to_scalar() 147 reads[i] = &chan_intrin->dest.ssa; in lower_subgroup_op_to_scalar() 158 nir_ssa_def *value = intrin->src[0].ssa; in lower_vote_eq_to_scalar() 165 1, intrin->dest.ssa.bit_size, NULL); in lower_vote_eq_to_scalar() [all …]
|
D | nir_opt_remove_phis.c | 32 get_parent_mov(nir_ssa_def *ssa) in get_parent_mov() argument 34 if (ssa->parent_instr->type != nir_instr_type_alu) in get_parent_mov() 37 nir_alu_instr *alu = nir_instr_as_alu(ssa->parent_instr); in get_parent_mov() 42 matching_mov(nir_alu_instr *mov1, nir_ssa_def *ssa) in matching_mov() argument 47 nir_alu_instr *mov2 = get_parent_mov(ssa); in matching_mov() 95 if (src->src.ssa == &phi->dest.ssa) in remove_phis_block() 99 def = src->src.ssa; in remove_phis_block() 101 } else if (src->src.ssa->parent_instr->type == nir_instr_type_ssa_undef && in remove_phis_block() 105 if (src->src.ssa != def && !matching_mov(mov, src->src.ssa)) { in remove_phis_block() 119 def = nir_ssa_undef(b, phi->dest.ssa.num_components, in remove_phis_block() [all …]
|
D | nir_opt_constant_folding.c | 62 bit_size = alu->dest.dest.ssa.bit_size; in try_fold_alu() 70 bit_size = alu->src[i].src.ssa->bit_size; in try_fold_alu() 72 nir_instr *src_instr = alu->src[i].src.ssa->parent_instr; in try_fold_alu() 98 nir_eval_const_opcode(alu->op, dest, alu->dest.dest.ssa.num_components, in try_fold_alu() 103 nir_ssa_def *imm = nir_build_imm(b, alu->dest.dest.ssa.num_components, in try_fold_alu() 104 alu->dest.dest.ssa.bit_size, in try_fold_alu() 106 nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(imm)); in try_fold_alu() 221 nir_ssa_def *val = nir_build_imm(b, intrin->dest.ssa.num_components, in try_fold_intrinsic() 222 intrin->dest.ssa.bit_size, v); in try_fold_intrinsic() 223 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(val)); in try_fold_intrinsic() [all …]
|
D | nir_gather_ssa_types.c | 76 src.ssa->parent_instr->type == nir_instr_type_ssa_undef; in copy_types() 77 copy_type(src.ssa->index, dest->ssa.index, src_is_sink, float_types, progress); in copy_types() 78 copy_type(src.ssa->index, dest->ssa.index, src_is_sink, int_types, progress); in copy_types() 125 set_type(alu->src[0].src.ssa->index, nir_type_bool, in nir_gather_ssa_types() 136 set_type(alu->src[i].src.ssa->index, info->input_types[i], in nir_gather_ssa_types() 139 set_type(alu->dest.dest.ssa.index, info->output_type, in nir_gather_ssa_types() 149 set_type(tex->src[i].src.ssa->index, in nir_gather_ssa_types() 154 set_type(tex->dest.ssa.index, tex->dest_type, in nir_gather_ssa_types() 169 set_type(intrin->dest.ssa.index, in nir_gather_ssa_types() 179 set_type(intrin->src[1].ssa->index, in nir_gather_ssa_types() [all …]
|
D | nir_opt_copy_prop_vars.c | 69 } ssa; member 79 memset(&value->ssa, 0, sizeof(value->ssa)); in value_set_ssa_components() 82 value->ssa.def[i] = def; in value_set_ssa_components() 83 value->ssa.component[i] = i; in value_set_ssa_components() 115 (value->ssa.def[i] != intrin->src[1].ssa || in value_equals_store_src() 116 value->ssa.component[i] != i)) in value_equals_store_src() 457 memset(&value->ssa, 0, sizeof(value->ssa)); in value_set_from_value() 462 value->ssa.def[base_index + i] = from->ssa.def[i]; in value_set_from_value() 463 value->ssa.component[base_index + i] = from->ssa.component[i]; in value_set_from_value() 485 if (!entry->src.ssa.def[index]) in load_element_from_ssa_entry_value() [all …]
|
D | nir_lower_bit_size.c | 52 unsigned dst_bit_size = alu->dest.dest.ssa.bit_size; in lower_alu_instr() 92 nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(dst)); in lower_alu_instr() 94 nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(lowered_dst)); in lower_alu_instr() 119 const unsigned old_bit_size = intrin->dest.ssa.bit_size; in lower_intrinsic_instr() 132 nir_ssa_def *new_src = nir_convert_to_bit_size(b, intrin->src[0].ssa, in lower_intrinsic_instr() 139 assert(new_intrin->dest.ssa.bit_size == 1); in lower_intrinsic_instr() 144 assert(intrin->src[0].ssa->bit_size == intrin->dest.ssa.bit_size); in lower_intrinsic_instr() 145 new_intrin->dest.ssa.bit_size = bit_size; in lower_intrinsic_instr() 150 nir_ssa_def *res = &new_intrin->dest.ssa; in lower_intrinsic_instr() 178 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(res)); in lower_intrinsic_instr() [all …]
|
D | nir_opt_vectorize.c | 36 void *hash_data = nir_src_is_const(*src) ? NULL : src->ssa; in hash_src() 56 hash = HASH(hash, instr->dest.dest.ssa.bit_size); in hash_alu() 83 return src1->ssa == src2->ssa || in srcs_equal() 109 if (alu1->dest.dest.ssa.bit_size != alu2->dest.dest.ssa.bit_size) in instrs_equal() 173 assert(alu1->dest.dest.ssa.bit_size == alu2->dest.dest.ssa.bit_size); in instr_try_combine() 174 unsigned alu1_components = alu1->dest.dest.ssa.num_components; in instr_try_combine() 175 unsigned alu2_components = alu2->dest.dest.ssa.num_components; in instr_try_combine() 182 (total_components > 2 || alu1->dest.dest.ssa.bit_size != 16)) in instr_try_combine() 194 total_components, alu1->dest.dest.ssa.bit_size, NULL); in instr_try_combine() 210 if (alu1->src[i].src.ssa != alu2->src[i].src.ssa) { in instr_try_combine() [all …]
|
D | nir_opt_intrinsics.c | 42 if (!list_is_empty(&shuffle->dest.ssa.if_uses) || in src_is_single_use_shuffle() 43 !list_is_singular(&shuffle->dest.ssa.uses)) in src_is_single_use_shuffle() 49 *data = shuffle->src[0].ssa; in src_is_single_use_shuffle() 50 *index = shuffle->src[1].ssa; in src_is_single_use_shuffle() 76 nir_ssa_def *index = nir_bcsel(b, alu->src[0].src.ssa, index1, index2); in try_opt_bcsel_of_shuffle() 81 shuffle->num_components = alu->dest.dest.ssa.num_components; in try_opt_bcsel_of_shuffle() 83 alu->dest.dest.ssa.num_components, in try_opt_bcsel_of_shuffle() 84 alu->dest.dest.ssa.bit_size, NULL); in try_opt_bcsel_of_shuffle() 87 return &shuffle->dest.ssa; in try_opt_bcsel_of_shuffle() 105 nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, in opt_intrinsics_alu() [all …]
|
D | nir_lower_phis_to_scalar.c | 57 nir_instr *src_instr = src->src.ssa->parent_instr; in is_phi_src_scalarizable() 146 if (phi->dest.ssa.num_components == 1) in should_lower_phi() 209 unsigned bit_size = phi->dest.ssa.bit_size; in lower_phis_to_scalar_block() 215 nir_op vec_op = nir_op_vec(phi->dest.ssa.num_components); in lower_phis_to_scalar_block() 219 phi->dest.ssa.num_components, in lower_phis_to_scalar_block() 221 vec->dest.write_mask = (1 << phi->dest.ssa.num_components) - 1; in lower_phis_to_scalar_block() 223 for (unsigned i = 0; i < phi->dest.ssa.num_components; i++) { in lower_phis_to_scalar_block() 226 phi->dest.ssa.bit_size, NULL); in lower_phis_to_scalar_block() 228 vec->src[i].src = nir_src_for_ssa(&new_phi->dest.ssa); in lower_phis_to_scalar_block() 248 new_src->src = nir_src_for_ssa(&mov->dest.dest.ssa); in lower_phis_to_scalar_block() [all …]
|
D | nir_opt_rematerialize_compares.c | 78 nir_foreach_use(use, &instr->dest.dest.ssa) { in all_uses_are_bcsel() 90 if (alu->src[0].src.ssa != &instr->dest.dest.ssa) in all_uses_are_bcsel() 127 nir_foreach_use_safe(use, &alu->dest.dest.ssa) { in nir_opt_rematerialize_compares_impl() 142 if (use_alu->src[i].src.ssa == &alu->dest.dest.ssa) { in nir_opt_rematerialize_compares_impl() 145 nir_src_for_ssa(&clone->dest.dest.ssa)); in nir_opt_rematerialize_compares_impl() 151 nir_foreach_if_use_safe(use, &alu->dest.dest.ssa) { in nir_opt_rematerialize_compares_impl() 168 nir_src_for_ssa(&clone->dest.dest.ssa)); in nir_opt_rematerialize_compares_impl()
|
D | nir_opt_if.c | 162 nir_ssa_def *cond = nif->condition.ssa; in opt_peel_loop_initial_if() 312 bcsel->src[i].src.ssa->parent_instr->block != instr->block) in is_trivial_bcsel() 315 if (bcsel->src[i].src.ssa->parent_instr->type != nir_instr_type_phi) { in is_trivial_bcsel() 323 nir_foreach_phi_src(src, nir_instr_as_phi(bcsel->src[0].src.ssa->parent_instr)) { in is_trivial_bcsel() 435 nir_instr *const src_instr = alu->src[i].src.ssa->parent_instr; in opt_split_alu_of_phi() 455 if (src_of_phi->src.ssa->parent_instr->type != in opt_split_alu_of_phi() 460 if (src_of_phi->src.ssa->parent_instr->type != in opt_split_alu_of_phi() 465 prev_srcs[i] = src_of_phi->src.ssa; in opt_split_alu_of_phi() 468 continue_srcs[i] = src_of_phi->src.ssa; in opt_split_alu_of_phi() 482 prev_srcs[i] = alu->src[i].src.ssa; in opt_split_alu_of_phi() [all …]
|
D | nir_deref.c | 38 cast->dest.ssa.num_components == parent->dest.ssa.num_components && in is_trivial_deref_cast() 39 cast->dest.ssa.bit_size == parent->dest.ssa.bit_size; in is_trivial_deref_cast() 113 if (!list_is_empty(&d->dest.ssa.uses)) in nir_deref_instr_remove_if_unused() 159 nir_foreach_use(use_src, &deref->dest.ssa) { in nir_deref_instr_has_complex_use() 228 nir_foreach_if_use(use, &deref->dest.ssa) in nir_deref_instr_has_complex_use() 334 nir_ssa_def *offset = nir_imm_intN_t(b, 0, deref->dest.ssa.bit_size); in nir_build_deref_offset() 419 nir_instr_as_deref(deref->parent.ssa->parent_instr); in nir_fixup_deref_modes() 582 } else if (a_tail->arr.index.ssa == b_tail->arr.index.ssa) { in nir_compare_deref_paths() 677 new_deref->parent = nir_src_for_ssa(&parent->dest.ssa); in rematerialize_deref_in_block() 708 deref->dest.ssa.num_components, in rematerialize_deref_in_block() [all …]
|
D | nir_opt_uniform_atomics.c | 144 nir_ssa_scalar cond = {nir_cf_node_as_if(cf)->condition.ssa, 0}; in is_atomic_already_optimized() 162 return &intrin->dest.ssa; in emit_scalar_intrinsic() 176 return &ri->dest.ssa; in emit_read_invocation() 194 *scan = &intrin->dest.ssa; in reduce_data() 197 *scan = &intrin->dest.ssa; in reduce_data() 202 *reduce = &intrin->dest.ssa; in reduce_data() 211 nir_ssa_def *data = intrin->src[data_src].ssa; in optimize_atomic() 231 nir_ssa_def *undef = nir_ssa_undef(b, 1, intrin->dest.ssa.bit_size); in optimize_atomic() 234 nir_ssa_def *result = nir_if_phi(b, &intrin->dest.ssa, undef); in optimize_atomic() 256 ASSERTED bool original_result_divergent = intrin->dest.ssa.divergent; in optimize_and_rewrite_atomic() [all …]
|
/external/mesa3d/src/intel/compiler/ |
D | brw_nir_opt_peephole_ffma.c | 57 if (!are_all_uses_fadd(&use_alu->dest.dest.ssa)) in are_all_uses_fadd() 76 nir_instr *instr = src->src.ssa->parent_instr; in get_mul_for_src() 94 alu = get_mul_for_src(&alu->src[0], alu->dest.dest.ssa.num_components, in get_mul_for_src() 99 alu = get_mul_for_src(&alu->src[0], alu->dest.dest.ssa.num_components, in get_mul_for_src() 105 alu = get_mul_for_src(&alu->src[0], alu->dest.dest.ssa.num_components, in get_mul_for_src() 116 if (!are_all_uses_fadd(&alu->dest.dest.ssa)) in get_mul_for_src() 151 if (srcs[i].src.ssa->parent_instr->type == nir_instr_type_load_const) { in any_alu_src_is_a_constant() 153 nir_instr_as_load_const (srcs[i].src.ssa->parent_instr); in any_alu_src_is_a_constant() 189 if (add->src[0].src.ssa == add->src[1].src.ssa) in brw_nir_opt_peephole_ffma_block() 203 add->dest.dest.ssa.num_components, in brw_nir_opt_peephole_ffma_block() [all …]
|
/external/mesa3d/src/compiler/nir/tests/ |
D | load_store_vectorizer_tests.cpp | 31 EXPECT_EQ((instr)->src[0].src.ssa, &(load)->dest.ssa); \ 192 res_map[binding] = &res->dest.ssa; in get_resource() 193 return &res->dest.ssa; in get_resource() 249 nir_alu_instr *mov = nir_instr_as_alu(nir_mov(b, &load->dest.ssa)->parent_instr); in create_indirect_load() 318 load->src[0] = nir_src_for_ssa(&deref->dest.ssa); in create_shared_load() 320 nir_alu_instr *mov = nir_instr_as_alu(nir_mov(b, &load->dest.ssa)->parent_instr); in create_shared_load() 337 store->src[0] = nir_src_for_ssa(&deref->dest.ssa); in create_shared_store() 358 if (alu->src[index].src.ssa != def) in test_alu_def() 400 ASSERT_EQ(load->dest.ssa.bit_size, 32); in TEST_F() 401 ASSERT_EQ(load->dest.ssa.num_components, 2); in TEST_F() [all …]
|
/external/mesa3d/src/gallium/drivers/lima/ir/ |
D | lima_nir_duplicate_intrinsic.c | 35 nir_foreach_use_safe(use_src, &itr->dest.ssa) { in lima_nir_duplicate_intrinsic() 46 dupl->src[0].ssa = itr->src[0].ssa; in lima_nir_duplicate_intrinsic() 51 dupl->num_components, itr->dest.ssa.bit_size, in lima_nir_duplicate_intrinsic() 52 itr->dest.ssa.name); in lima_nir_duplicate_intrinsic() 61 nir_instr_rewrite_src(use_src->parent_instr, use_src, nir_src_for_ssa(&dupl->dest.ssa)); in lima_nir_duplicate_intrinsic() 69 nir_foreach_if_use_safe(use_src, &itr->dest.ssa) { in lima_nir_duplicate_intrinsic() 80 dupl->src[0].ssa = itr->src[0].ssa; in lima_nir_duplicate_intrinsic() 85 dupl->num_components, itr->dest.ssa.bit_size, in lima_nir_duplicate_intrinsic() 86 itr->dest.ssa.name); in lima_nir_duplicate_intrinsic() 95 nir_if_rewrite_condition(use_src->parent_if, nir_src_for_ssa(&dupl->dest.ssa)); in lima_nir_duplicate_intrinsic()
|
D | lima_nir_split_load_input.c | 48 nir_ssa_def *ssa = alu->src[0].src.ssa; in lima_nir_split_load_input_block() local 49 if (ssa->parent_instr->type != nir_instr_type_intrinsic) in lima_nir_split_load_input_block() 52 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(ssa->parent_instr); in lima_nir_split_load_input_block() 76 ssa->bit_size, in lima_nir_split_load_input_block() 87 nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, in lima_nir_split_load_input_block() 88 nir_src_for_ssa(&new_intrin->dest.ssa)); in lima_nir_split_load_input_block()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/Util/PredicateInfo/ |
D | testandor.ll | 13 ; CHECK: [[X_0:%.*]] = call i32 @llvm.ssa.copy.{{.+}}(i32 [[X]]) 14 ; CHECK: [[Y_0:%.*]] = call i32 @llvm.ssa.copy.{{.+}}(i32 [[Y]]) 15 ; CHECK: [[XZ_0:%.*]] = call i1 @llvm.ssa.copy.{{.+}}(i1 [[XZ]]) 16 ; CHECK: [[YZ_0:%.*]] = call i1 @llvm.ssa.copy.{{.+}}(i1 [[YZ]]) 17 ; CHECK: [[Z_0:%.*]] = call i1 @llvm.ssa.copy.{{.+}}(i1 [[Z]]) 57 ; CHECK: [[X_0:%.*]] = call i32 @llvm.ssa.copy.{{.+}}(i32 [[X]]) 58 ; CHECK: [[Y_0:%.*]] = call i32 @llvm.ssa.copy.{{.+}}(i32 [[Y]]) 59 ; CHECK: [[XZ_0:%.*]] = call i1 @llvm.ssa.copy.{{.+}}(i1 [[XZ]]) 60 ; CHECK: [[YZ_0:%.*]] = call i1 @llvm.ssa.copy.{{.+}}(i1 [[YZ]]) 61 ; CHECK: [[Z_0:%.*]] = call i1 @llvm.ssa.copy.{{.+}}(i1 [[Z]]) [all …]
|
/external/mesa3d/src/gallium/drivers/r600/sfn/ |
D | sfn_nir_lower_tess_io.cpp | 33 return &result->dest.ssa; in emit_load_param_base() 94 op->src[0].ssa, addr); in emil_lsd_in_addr() 100 offset = nir_iadd(b, offset, nir_ishl(b, op->src[1].ssa, nir_imm_int(b, 4))); in emil_lsd_in_addr() 113 op->src[src_offset].ssa, addr1); in emil_lsd_out_addr() 117 nir_ishl(b, op->src[src_offset + 1].ssa, nir_imm_int(b,4))), in emil_lsd_out_addr() 149 nir_ssa_def_rewrite_uses(&op->dest.ssa, nir_src_for_ssa(&load_tcs_in->dest.ssa)); in replace_load_instr() 161 return &patch_id->dest.ssa; in r600_load_rel_patch_id() 175 store_tcs_out->src[0] = nir_src_for_ssa(op->src[0].ssa); in emit_store_lds() 176 store_tcs_out->num_components = store_tcs_out->src[0].ssa->num_components; in emit_store_lds() 192 nir_ishl(b, op->src[src_offset].ssa, nir_imm_int(b,4))), in emil_tcs_io_offset() [all …]
|
D | sfn_valuepool.cpp | 81 << (v.is_ssa ? v.ssa->index : v.reg.reg->index); in from_nir() 102 unsigned index = v.ssa->index; in from_nir() 119 switch (v.ssa->bit_size) { in from_nir() 125 sfn_log << SfnLog::reg << "Unsupported bit size " << v.ssa->bit_size in from_nir() 174 int idx = src.is_ssa ? get_dst_ssa_register_index(*src.ssa): in create_register_from_nir_src() 191 return dst.is_ssa ? get_dst_ssa_register_index(dst.ssa): in lookup_register_index() 200 get_ssa_register_index(*src.ssa) : in lookup_register_index() 230 sfn_log << "ssa_" << v.ssa.index; in from_nir() 341 unsigned ValuePool::get_dst_ssa_register_index(const nir_ssa_def& ssa) in get_dst_ssa_register_index() argument 344 << ssa.index; in get_dst_ssa_register_index() [all …]
|
/external/mesa3d/src/freedreno/ir3/ |
D | ir3_nir_lower_tex_prefetch.c | 32 coord_offset(nir_ssa_def *ssa) in coord_offset() argument 34 nir_instr *parent_instr = ssa->parent_instr; in coord_offset() 51 int base_offset = coord_offset(alu->src[0].src.ssa) + in coord_offset() 59 int nth_offset = coord_offset(alu->src[i].src.ssa) + in coord_offset() 84 nir_instr_as_intrinsic(input->src[0].ssa->parent_instr); in coord_offset() 100 ir3_nir_coord_offset(nir_ssa_def *ssa) in ir3_nir_coord_offset() argument 103 assert (ssa->num_components == 2); in ir3_nir_coord_offset() 104 return coord_offset(ssa); in ir3_nir_coord_offset() 190 if (ir3_nir_coord_offset(coord->src.ssa) >= 0) { in lower_tex_prefetch_block()
|
D | ir3_group.c | 36 struct ir3_instruction *src = ssa(collect->regs[idx+1]); in insert_mov() 85 struct ir3_instruction *instr = ssa(regs[i]); in group_collect() 87 struct ir3_instruction *left = (i > 0) ? ssa(regs[i - 1]) : NULL; in group_collect() 88 struct ir3_instruction *right = (i < (n-1)) ? ssa(regs[i + 1]) : NULL; in group_collect() 105 if (in_neighbor_list(ssa(regs[j]), instr, i)) in group_collect() 123 struct ir3_instruction *instr = ssa(regs[i]); in group_collect() 125 struct ir3_instruction *left = (i > 0) ? ssa(regs[i - 1]) : NULL; in group_collect() 126 struct ir3_instruction *right = (i < (n-1)) ? ssa(regs[i + 1]) : NULL; in group_collect()
|