/external/mesa3d/src/compiler/spirv/ |
D | vtn_opencl.c | 36 struct vtn_type **src_types, 56 int ntypes, struct vtn_type **src_types, in vtn_opencl_mangle() argument 63 const struct glsl_type *type = src_types[i]->type; in vtn_opencl_mangle() 64 enum vtn_base_type base_type = src_types[i]->base_type; in vtn_opencl_mangle() 65 if (src_types[i]->base_type == vtn_base_type_pointer) { in vtn_opencl_mangle() 67 int address_space = to_llvm_address_space(src_types[i]->storage_class); in vtn_opencl_mangle() 71 type = src_types[i]->deref->type; in vtn_opencl_mangle() 72 base_type = src_types[i]->deref->base_type; in vtn_opencl_mangle() 86 const struct glsl_type *other_type = src_types[j]->base_type == vtn_base_type_pointer ? in vtn_opencl_mangle() 87 src_types[j]->deref->type : src_types[j]->type; in vtn_opencl_mangle() [all …]
|
/external/mesa3d/src/panfrost/bifrost/ |
D | bi_special.c | 51 .src_types = { in bi_emit_fexp2_new() 72 .src_types = { nir_type_float32 }, in bi_emit_fexp2_new() 84 .src_types = { nir_type_int32, nir_type_float32 }, in bi_emit_fexp2_new() 105 .src_types = { nir_type_float32 }, in bi_emit_flog2_new() 115 .src_types = { nir_type_int32 }, in bi_emit_flog2_new() 129 .src_types = { nir_type_float32, nir_type_float32 }, in bi_emit_flog2_new() 143 .src_types = { nir_type_float32 }, in bi_emit_flog2_new() 157 .src_types = { in bi_emit_flog2_new()
|
D | bifrost_compile.c | 85 .src_types = { nir_type_uint32 }, in bi_load() 129 .src_types = { nir_type_uint32, nir_type_uint32, nir_type_uint32 }, in bi_emit_ld_output() 202 .src_types = { in bi_emit_ld_blend_input() 231 .src_types = { nir_type_uint32, T }, in bi_emit_atest() 253 .src_types = { in bi_emit_blend() 289 assert(blend.src_types[0]); in bi_emit_blend() 290 ctx->blend_types[blend.blend_location] = blend.src_types[0]; in bi_emit_blend() 305 .src_types = { in bi_emit_zs_emit() 380 ld.src_types[1] = nir_type_uint32; in bi_load_with_r61() 381 ld.src_types[2] = nir_type_uint32; in bi_load_with_r61() [all …]
|
D | bir.c | 47 bool floaty = nir_alu_type_get_base_type(ins->src_types[0]) == nir_type_float; in bi_has_source_mods() 109 unsigned src_bytes = nir_alu_type_get_type_size(ins->src_types[MAX2(src, 0)]); in bi_get_component_count() 134 nir_alu_type T = ins->src_types[s]; in bi_bytemask_of_read_components() 157 unsigned sz = nir_alu_type_get_type_size(ins->src_types[index]); in bi_get_immediate()
|
D | bi_schedule.c | 116 bool ic = nir_alu_type_get_base_type(ins->src_types[0]) != nir_type_float; in bi_icmp() 124 bool sz = nir_alu_type_get_type_size(ins->src_types[0]) < 32; in bi_imath_small() 141 ins->src_types[1] = ins->src_types[0]; in bi_lower_fmov()
|
D | bi_pack.c | 469 bool src0_f16 = bundle.fma->src_types[0] == nir_type_float16; in bi_pack_fma() 470 bool src0_f32 = bundle.fma->src_types[0] == nir_type_float32; in bi_pack_fma() 471 bool src0_u16 = bundle.fma->src_types[0] == nir_type_uint16; in bi_pack_fma() 472 bool src0_s16 = bundle.fma->src_types[0] == nir_type_int16; in bi_pack_fma() 473 bool src0_s8 = bundle.fma->src_types[0] == nir_type_int8; in bi_pack_fma() 474 bool src0_u8 = bundle.fma->src_types[0] == nir_type_uint8; in bi_pack_fma() 569 if (nir_alu_type_get_type_size(bundle.fma->src_types[0]) == 16) { in bi_pack_fma() 572 assert(nir_alu_type_get_type_size(bundle.fma->src_types[0]) == 8); in bi_pack_fma() 601 unsigned size = nir_alu_type_get_type_size(ins->src_types[0]); in bi_pack_add_branch_cond() 687 assert(ins->src_types[0] == nir_type_int32); in bi_pack_add_special() [all …]
|
D | bi_lower_combine.c | 57 .src_types = { nir_type_uint32 }, in bi_combine_mov32() 73 .src_types = { nir_type_uint16, nir_type_uint16 }, in bi_combine_sel16() 110 .src_types = { nir_type_uint32 }, in bi_combine_copy()
|
D | bi_ra.c | 134 nir_alu_type T = ins->src_types[src]; in bi_adjust_src_ra() 198 .src_types = { in bi_spill() 222 .src_types = { in bi_fill()
|
/external/mesa3d/src/panfrost/bifrost/test/ |
D | bi_interpret.c | 403 srcs[src].u64 = bit_read(s, ins, ins->src[src], ins->src_types[src], FMA); in bit_step() 408 if (ins->src_types[src] == nir_type_float16) { in bit_step() 414 } else if (ins->src_types[src] == nir_type_float32) { in bit_step() 433 nir_alu_type T = ins->src_types[0]; in bit_step() 485 dest.f32 = bit_as_float32(ins->src_types[0], srcs[0], comp); in bit_step() 487 dest.u32 = bit_as_uint32(ins->src_types[0], srcs[0], comp, ins->roundmode); in bit_step() 489 dest.i32 = bit_as_int32(ins->src_types[0], srcs[0], comp, ins->roundmode); in bit_step() 491 … dest.u16[0] = bit_as_float16(ins->src_types[0], srcs[0], ins->swizzle[0][0]); in bit_step() 492 … dest.u16[1] = bit_as_float16(ins->src_types[0], srcs[0], ins->swizzle[0][1]); in bit_step() 494 … dest.u16[0] = bit_as_uint16(ins->src_types[0], srcs[0], ins->swizzle[0][0], ins->roundmode); in bit_step() [all …]
|
D | bi_test_pack.c | 54 .src_types = { in bit_test_single() 75 .src_types = { in bit_test_single() 89 .src_types = { in bit_test_single() 191 ins.src_types[i] = T; in bit_ins() 265 ins.src_types[3] = nir_type_int32; in bit_fma_mscale_helper() 378 ins.src_types[c] = nir_type_uint | size; in bit_select_helper() 489 ins.src_types[0] = Ts[from_base] | from_size; in bit_convert_helper() 495 ins.src_types[1] = ins.src_types[0]; in bit_convert_helper() 498 ins.src[1] = ins.src_types[1] = 0; in bit_convert_helper() 546 ins.src_types[2] = nir_type_uint8; in bit_bitwise_helper()
|
/external/mesa3d/prebuilt-intermediates/bifrost/ |
D | bi_generated_pack.h | 41 unsigned lane2_sz = nir_alu_type_get_type_size(ins->src_types[2]); in pan_pack_fma_rshift_and_i32() 68 unsigned lanes1_sz = nir_alu_type_get_type_size(ins->src_types[1]); in pan_pack_add_iadd_u32() 191 unsigned swz0_sz = nir_alu_type_get_type_size(ins->src_types[0]); in pan_pack_fma_clz_v2u16() 221 unsigned lane1_sz = nir_alu_type_get_type_size(ins->src_types[1]); in pan_pack_add_fatan_table_f16() 229 unsigned lane0_sz = nir_alu_type_get_type_size(ins->src_types[0]); in pan_pack_add_fatan_table_f16() 251 unsigned lane2_sz = nir_alu_type_get_type_size(ins->src_types[2]); in pan_pack_fma_rrot_double_i32() 294 unsigned lanes2_sz = nir_alu_type_get_type_size(ins->src_types[2]); in pan_pack_fma_lshift_xor_v4i8() 351 unsigned widen0_sz = nir_alu_type_get_type_size(ins->src_types[0]); in pan_pack_add_fround_f32() 390 unsigned swz0_sz = nir_alu_type_get_type_size(ins->src_types[0]); in pan_pack_add_logb_v2f16() 426 unsigned lanes2_sz = nir_alu_type_get_type_size(ins->src_types[2]); in pan_pack_fma_arshift_v4i8() [all …]
|
/external/selinux/python/sepolgen/tests/ |
D | test_refpolicy.py | 157 self.assertTrue(isinstance(a.src_types, set)) 164 a.src_types.add("foo_t") 171 a.src_types.add("user_t") 187 self.assertIsInstance(a.src_types, set) 217 self.assertEqual(a.src_types, {"foo"}) 233 self.assertEqual(a.src_types, {"foo"}) 243 a.src_types.to_space_str = lambda: "second" 256 self.assertTrue(isinstance(a.src_types, set)) 263 a.src_types.add("foo_t") 275 a.src_types.add("foo_t") [all …]
|
D | test_policygen.py | 106 self.assertEqual(av_rule1.src_types, {"test_src_t"}) 112 self.assertEqual(av_ext_rule1.src_types, {"test_src_t"}) 122 self.assertEqual(av_rule2.src_types, {"test_src_t"}) 128 self.assertEqual(av_ext_rule2.src_types, {"test_src_t"})
|
D | test_access.py | 237 rule.src_types.add("foo") 238 rule.src_types.add("baz") 272 rule.src_types.add("foo") 273 rule.src_types.add("baz")
|
/external/selinux/python/sepolgen/src/sepolgen/ |
D | output.py | 76 ret = id_set_cmp(a.src_types, b.src_types) 101 return id_set_cmp([a.args[0]], b.src_types) 106 return id_set_cmp(a.src_types, [b.args[0]]) 142 x = util.first(rule.src_types)
|
D | refpolicy.py | 524 self.src_types = IdSet() 546 self.src_types.add(av.src_type) 560 self.src_types.to_space_str(), 582 self.src_types = IdSet() 602 self.src_types.add(av.src_type) 617 self.src_types.to_space_str(), 636 self.src_types = IdSet() 652 self.src_types.to_space_str(),
|
D | interfaces.py | 172 if extract_from_set(rule.src_types, refpolicy.SRC_TYPE): 429 src_types = self.map_param(av.src_type, ifcall) 430 if src_types is None: 451 for src_type in src_types:
|
/external/mesa3d/src/panfrost/midgard/ |
D | mir.c | 129 if (ins->dest_type != ins->src_types[i]) return true; in mir_nontrivial_mod() 147 if (ins->dest_type != ins->src_types[1]) in mir_nontrivial_outmod() 301 nir_alu_type_get_type_size(ins->src_types[i])); in mir_bytemask_of_read_components_index() 428 temp = ins->src_types[0]; in mir_flip() 429 ins->src_types[0] = ins->src_types[1]; in mir_flip() 430 ins->src_types[1] = temp; in mir_flip()
|
D | midgard_print.c | 117 unsigned sz = nir_alu_type_get_type_size(ins->src_types[src_idx]); in mir_print_embedded_constant() 152 if (ins->src[c] != ~0 && ins->src_types[c] != nir_type_invalid) { \ 153 pan_print_alu_type(ins->src_types[c], stdout); \ 154 mir_print_swizzle(ins->swizzle[c], ins->src_types[c]); \
|
D | midgard_emit.c | 52 unsigned sz = nir_alu_type_get_type_size(ins->src_types[i]); in mir_pack_mod() 56 mir_get_imod(ins->src_shift[i], ins->src_types[i], half, scalar) : in mir_pack_mod() 97 bool half_0 = nir_alu_type_get_type_size(ins->src_types[0]) == 16; in vector_to_scalar_alu() 98 bool half_1 = nir_alu_type_get_type_size(ins->src_types[1]) == 16; in vector_to_scalar_alu() 294 unsigned sz = nir_alu_type_get_type_size(ins->src_types[i]); in mir_pack_vector_srcs() 300 ins->src_types[i], reg_mode_for_bitsize(base_size), in mir_pack_vector_srcs() 514 unsigned sz = nir_alu_type_get_type_size(ins->src_types[1]); in load_store_from_instr() 520 unsigned sz = nir_alu_type_get_type_size(ins->src_types[2]); in load_store_from_instr() 938 unsigned isz = nir_alu_type_get_type_size(ins->src_types[1]); in emit_binary_bundle()
|
D | midgard_compile.c | 122 i.src_types[0] = T; \ 547 ins->src_types[to] = nir_op_infos[instr->op].input_types[i] | bits; in mir_copy_src() 943 ins.src_types[1] = nir_type_float32; in emit_alu() 955 ins.src_types[1] = nir_type_float16; in emit_alu() 965 ins.src_types[1] = ins.src_types[0]; in emit_alu() 1082 ins.src_types[2] = nir_type_uint32; in emit_ubo_read() 1157 .src_types = { 0, 0, 0, type | bitsize }, in emit_atomic() 1168 ins.src_types[1] = nir_type_uint | nir_src_bit_size(*src_offset); in emit_atomic() 1174 ins.src_types[2] = type | bitsize; in emit_atomic() 1222 ins.src_types[2] = nir_type_uint32; in emit_varying_read() [all …]
|
D | midgard_address.c | 206 ins->src_types[2] = nir_type_uint | nir_src_bit_size(*offset); in mir_set_offset() 219 ins->src_types[1] = nir_type_uint | match.A.def->bit_size; in mir_set_offset() 226 ins->src_types[2] = nir_type_uint | match.B.def->bit_size; in mir_set_offset()
|
D | midgard_schedule.c | 204 unsigned sz0 = nir_alu_type_get_type_size(ains->src_types[0]); in mir_is_scalar() 205 unsigned sz1 = nir_alu_type_get_type_size(ains->src_types[1]); in mir_is_scalar() 366 unsigned type_size = nir_alu_type_get_type_size(ins->src_types[src]) / 8; in mir_adjust_constant() 368 unsigned max_comp = mir_components_for_type(ins->src_types[src]); in mir_adjust_constant() 521 if (ins->src_types[0] != ins->src_types[1]) in mir_is_add_2() 1131 branch->src_types[1] = sadd->dest_type; in mir_schedule_alu() 1157 vadd->src_types[0] = nir_type_uint32; in mir_schedule_alu() 1166 vadd->src_types[0] = nir_type_uint32; in mir_schedule_alu()
|
D | midgard_ra.c | 283 m.src_types[1] = m.dest_type; in mir_lower_special_reads() 381 if (nir_alu_type_get_type_size(ins->src_types[v]) == 64) in mir_is_64() 457 unsigned size = nir_alu_type_get_type_size(ins->src_types[v]); in allocate_registers() 667 util_logbase2(nir_alu_type_get_type_size(ins->src_types[i]) / 8); in install_registers_instr() 864 st.dest_type = st.src_types[1] = ins->dest_type; in mir_spill_register()
|
D | compiler.h | 103 nir_alu_type src_types[MIR_SRC_COUNT]; member 542 .src_types = { 0, nir_type_uint32 }, in v_mov() 595 ins.src_types[0] = nir_type_uint32; in v_load_store_scratch()
|