/external/rust/crates/libz-sys/src/zlib-ng/ |
D | compare258.c | 12 static inline uint32_t compare256_c_static(const unsigned char *src0, const unsigned char *src1) { in compare256_c_static() argument 16 if (*src0 != *src1) in compare256_c_static() 17 return len + (*src0 == *src1); in compare256_c_static() 18 src0 += 1, src1 += 1, len += 1; in compare256_c_static() 19 if (*src0 != *src1) in compare256_c_static() 20 return len + (*src0 == *src1); in compare256_c_static() 21 src0 += 1, src1 += 1, len += 1; in compare256_c_static() 22 if (*src0 != *src1) in compare256_c_static() 23 return len + (*src0 == *src1); in compare256_c_static() 24 src0 += 1, src1 += 1, len += 1; in compare256_c_static() [all …]
|
/external/llvm-project/llvm/lib/Target/Hexagon/ |
D | HexagonDepMapAsm2Intrin.td | 14 def: Pat<(int_hexagon_C2_cmpeq IntRegs:$src1, IntRegs:$src2), 15 (C2_tfrpr (C2_cmpeq IntRegs:$src1, IntRegs:$src2))>, Requires<[HasV5]>; 16 def: Pat<(int_hexagon_C2_cmpgt IntRegs:$src1, IntRegs:$src2), 17 (C2_tfrpr (C2_cmpgt IntRegs:$src1, IntRegs:$src2))>, Requires<[HasV5]>; 18 def: Pat<(int_hexagon_C2_cmpgtu IntRegs:$src1, IntRegs:$src2), 19 (C2_tfrpr (C2_cmpgtu IntRegs:$src1, IntRegs:$src2))>, Requires<[HasV5]>; 20 def: Pat<(int_hexagon_C2_cmpeqp DoubleRegs:$src1, DoubleRegs:$src2), 21 (C2_tfrpr (C2_cmpeqp DoubleRegs:$src1, DoubleRegs:$src2))>, Requires<[HasV5]>; 22 def: Pat<(int_hexagon_C2_cmpgtp DoubleRegs:$src1, DoubleRegs:$src2), 23 (C2_tfrpr (C2_cmpgtp DoubleRegs:$src1, DoubleRegs:$src2))>, Requires<[HasV5]>; [all …]
|
D | HexagonMapAsm2IntrinV65.gen.td | 9 …at<(int_hexagon_A6_vcmpbeq_notany DoubleRegs:$src1, DoubleRegs:$src2), (A6_vcmpbeq_notany DoubleRe… 10 …ef: Pat<(int_hexagon_V6_vasruwuhsat HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruwuhsat … 11 …at<(int_hexagon_V6_vasruwuhsat_128B HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruwuhsat … 12 …ef: Pat<(int_hexagon_V6_vasruhubsat HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruhubsat … 13 …at<(int_hexagon_V6_vasruhubsat_128B HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruhubsat … 14 …Pat<(int_hexagon_V6_vasruhubrndsat HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruhubrndsa… 15 …int_hexagon_V6_vasruhubrndsat_128B HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruhubrndsa… 16 def: Pat<(int_hexagon_V6_vabsb HvxVR:$src1), (V6_vabsb HvxVR:$src1)>, Requires<[HasV65, UseHVX]>; 17 def: Pat<(int_hexagon_V6_vabsb_128B HvxVR:$src1), (V6_vabsb HvxVR:$src1)>, Requires<[HasV65, UseHVX… 18 def: Pat<(int_hexagon_V6_vabsb_sat HvxVR:$src1), (V6_vabsb_sat HvxVR:$src1)>, Requires<[HasV65, Use… [all …]
|
D | HexagonMapAsm2IntrinV62.gen.td | 10 def: Pat<(IntID HvxVR:$src1, IntRegs:$src2), 11 (MI HvxVR:$src1, IntRegs:$src2)>; 12 def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxVR:$src1, IntRegs:$src2), 13 (MI HvxVR:$src1, IntRegs:$src2)>; 17 def: Pat<(IntID HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), 18 (MI HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3)>; 19 def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxVR:$src1, HvxVR:$src2, 21 (MI HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3)>; 25 def: Pat<(IntID HvxVR:$src1, HvxVR:$src2), 26 (MI HvxVR:$src1, HvxVR:$src2)>; [all …]
|
D | HexagonIntrinsicsV60.td | 15 def : Pat < (v16i32 (int_hexagon_V6_lo (v32i32 HvxWR:$src1))), 16 (v16i32 (EXTRACT_SUBREG (v32i32 HvxWR:$src1), vsub_lo)) >; 18 def : Pat < (v16i32 (int_hexagon_V6_hi (v32i32 HvxWR:$src1))), 19 (v16i32 (EXTRACT_SUBREG (v32i32 HvxWR:$src1), vsub_hi)) >; 21 def : Pat < (v32i32 (int_hexagon_V6_lo_128B (v64i32 HvxWR:$src1))), 22 (v32i32 (EXTRACT_SUBREG (v64i32 HvxWR:$src1), vsub_lo)) >; 24 def : Pat < (v32i32 (int_hexagon_V6_hi_128B (v64i32 HvxWR:$src1))), 25 (v32i32 (EXTRACT_SUBREG (v64i32 HvxWR:$src1), vsub_hi)) >; 28 def : Pat <(v64i1 (bitconvert (v16i32 HvxVR:$src1))), 29 (v64i1 (V6_vandvrt(v16i32 HvxVR:$src1), (A2_tfrsi 0x01010101)))>; [all …]
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/Hexagon/ |
D | HexagonDepMapAsm2Intrin.td | 14 def: Pat<(int_hexagon_S2_asr_r_p_or DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3), 15 (S2_asr_r_p_or DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3)>, Requires<[HasV5]>; 16 def: Pat<(int_hexagon_S2_vsatwh DoubleRegs:$src1), 17 (S2_vsatwh DoubleRegs:$src1)>, Requires<[HasV5]>; 18 def: Pat<(int_hexagon_M2_mpysu_up IntRegs:$src1, IntRegs:$src2), 19 (M2_mpysu_up IntRegs:$src1, IntRegs:$src2)>, Requires<[HasV5]>; 20 def: Pat<(int_hexagon_M2_mpyud_acc_ll_s0 DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3), 21 (M2_mpyud_acc_ll_s0 DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3)>, Requires<[HasV5]>; 22 def: Pat<(int_hexagon_M2_mpyud_acc_ll_s1 DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3), 23 (M2_mpyud_acc_ll_s1 DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3)>, Requires<[HasV5]>; [all …]
|
D | HexagonMapAsm2IntrinV65.gen.td | 9 …at<(int_hexagon_A6_vcmpbeq_notany DoubleRegs:$src1, DoubleRegs:$src2), (A6_vcmpbeq_notany DoubleRe… 10 …ef: Pat<(int_hexagon_V6_vasruwuhsat HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruwuhsat … 11 …at<(int_hexagon_V6_vasruwuhsat_128B HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruwuhsat … 12 …ef: Pat<(int_hexagon_V6_vasruhubsat HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruhubsat … 13 …at<(int_hexagon_V6_vasruhubsat_128B HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruhubsat … 14 …Pat<(int_hexagon_V6_vasruhubrndsat HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruhubrndsa… 15 …int_hexagon_V6_vasruhubrndsat_128B HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruhubrndsa… 16 def: Pat<(int_hexagon_V6_vabsb HvxVR:$src1), (V6_vabsb HvxVR:$src1)>, Requires<[HasV65, UseHVX]>; 17 def: Pat<(int_hexagon_V6_vabsb_128B HvxVR:$src1), (V6_vabsb HvxVR:$src1)>, Requires<[HasV65, UseHVX… 18 def: Pat<(int_hexagon_V6_vabsb_sat HvxVR:$src1), (V6_vabsb_sat HvxVR:$src1)>, Requires<[HasV65, Use… [all …]
|
D | HexagonMapAsm2IntrinV62.gen.td | 10 def: Pat<(IntID HvxVR:$src1, IntRegs:$src2), 11 (MI HvxVR:$src1, IntRegs:$src2)>; 12 def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxVR:$src1, IntRegs:$src2), 13 (MI HvxVR:$src1, IntRegs:$src2)>; 17 def: Pat<(IntID HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), 18 (MI HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3)>; 19 def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxVR:$src1, HvxVR:$src2, 21 (MI HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3)>; 25 def: Pat<(IntID HvxVR:$src1, HvxVR:$src2), 26 (MI HvxVR:$src1, HvxVR:$src2)>; [all …]
|
D | HexagonIntrinsicsV60.td | 15 def : Pat < (v16i32 (int_hexagon_V6_lo (v32i32 HvxWR:$src1))), 16 (v16i32 (EXTRACT_SUBREG (v32i32 HvxWR:$src1), vsub_lo)) >; 18 def : Pat < (v16i32 (int_hexagon_V6_hi (v32i32 HvxWR:$src1))), 19 (v16i32 (EXTRACT_SUBREG (v32i32 HvxWR:$src1), vsub_hi)) >; 21 def : Pat < (v32i32 (int_hexagon_V6_lo_128B (v64i32 HvxWR:$src1))), 22 (v32i32 (EXTRACT_SUBREG (v64i32 HvxWR:$src1), vsub_lo)) >; 24 def : Pat < (v32i32 (int_hexagon_V6_hi_128B (v64i32 HvxWR:$src1))), 25 (v32i32 (EXTRACT_SUBREG (v64i32 HvxWR:$src1), vsub_hi)) >; 28 def : Pat <(v512i1 (bitconvert (v16i32 HvxVR:$src1))), 29 (v512i1 (V6_vandvrt(v16i32 HvxVR:$src1), (A2_tfrsi 0x01010101)))>; [all …]
|
D | HexagonIntrinsics.td | 127 def : Pat <(int_hexagon_C2_cmpgei I32:$src1, s32_0ImmPred_timm:$src2), 128 (C2_tfrpr (C2_cmpgti I32:$src1, (SDEC1 s32_0ImmPred:$src2)))>; 130 def : Pat <(int_hexagon_C2_cmpgeui I32:$src1, u32_0ImmPred_timm:$src2), 131 (C2_tfrpr (C2_cmpgtui I32:$src1, (UDEC1 u32_0ImmPred:$src2)))>; 135 def : Pat <(int_hexagon_C2_cmplt I32:$src1, I32:$src2), 136 (C2_tfrpr (C2_cmpgt I32:$src2, I32:$src1))>; 137 def : Pat <(int_hexagon_C2_cmpltu I32:$src1, I32:$src2), 138 (C2_tfrpr (C2_cmpgtu I32:$src2, I32:$src1))>; 145 : Pat <(IntID I32:$src1, I32:$src2, u4_0ImmPred_timm:$src3, u5_0ImmPred_timm:$src4), 146 (OutputInst I32:$src1, I32:$src2, u4_0ImmPred:$src3, [all …]
|
/external/mesa3d/prebuilt-intermediates/nir/ |
D | nir_builder_opcodes.h | 29 nir_amul(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1) in nir_amul() argument 31 return nir_build_alu(build, nir_op_amul, src0, src1, NULL, NULL); in nir_amul() 34 nir_b16all_fequal16(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1) in nir_b16all_fequal16() argument 36 return nir_build_alu(build, nir_op_b16all_fequal16, src0, src1, NULL, NULL); in nir_b16all_fequal16() 39 nir_b16all_fequal2(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1) in nir_b16all_fequal2() argument 41 return nir_build_alu(build, nir_op_b16all_fequal2, src0, src1, NULL, NULL); in nir_b16all_fequal2() 44 nir_b16all_fequal3(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1) in nir_b16all_fequal3() argument 46 return nir_build_alu(build, nir_op_b16all_fequal3, src0, src1, NULL, NULL); in nir_b16all_fequal3() 49 nir_b16all_fequal4(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1) in nir_b16all_fequal4() argument 51 return nir_build_alu(build, nir_op_b16all_fequal4, src0, src1, NULL, NULL); in nir_b16all_fequal4() [all …]
|
D | nir_constant_expressions.c | 578 const int1_t src1 = -(int1_t)_src[1][_i].b; in evaluate_amul() local 580 int1_t dst = src0 * src1; in evaluate_amul() 597 const int8_t src1 = in evaluate_amul() local 600 int8_t dst = src0 * src1; in evaluate_amul() 616 const int16_t src1 = in evaluate_amul() local 619 int16_t dst = src0 * src1; in evaluate_amul() 635 const int32_t src1 = in evaluate_amul() local 638 int32_t dst = src0 * src1; in evaluate_amul() 654 const int64_t src1 = in evaluate_amul() local 657 int64_t dst = src0 * src1; in evaluate_amul() [all …]
|
/external/capstone/arch/X86/ |
D | X86MappingInsnOp.inc | 40 { /* X86_ADC16ri, X86_INS_ADC: adc{w} $src1, $src2 */ 44 { /* X86_ADC16ri8, X86_INS_ADC: adc{w} $src1, $src2 */ 48 { /* X86_ADC16rm, X86_INS_ADC: adc{w} $src1, $src2 */ 52 { /* X86_ADC16rr, X86_INS_ADC: adc{w} $src1, $src2 */ 76 { /* X86_ADC32ri, X86_INS_ADC: adc{l} $src1, $src2 */ 80 { /* X86_ADC32ri8, X86_INS_ADC: adc{l} $src1, $src2 */ 84 { /* X86_ADC32rm, X86_INS_ADC: adc{l} $src1, $src2 */ 88 { /* X86_ADC32rr, X86_INS_ADC: adc{l} $src1, $src2 */ 112 { /* X86_ADC64ri32, X86_INS_ADC: adc{q} $src1, $src2 */ 116 { /* X86_ADC64ri8, X86_INS_ADC: adc{q} $src1, $src2 */ [all …]
|
/external/llvm/lib/Target/Hexagon/ |
D | HexagonInstrInfoV60.td | 60 : V6_LDInst <(outs VectorRegs:$dst), (ins IntRegs:$src1, s4_6Imm:$src2), 65 : V6_LDInst <(outs VectorRegs128B:$dst), (ins IntRegs:$src1, s4_7Imm:$src2), 69 def V6_vL32b_ai : T_vload_ai <"$dst = vmem($src1+#$src2)">, 71 def V6_vL32b_nt_ai : T_vload_ai <"$dst = vmem($src1+#$src2):nt">, 74 def V6_vL32b_ai_128B : T_vload_ai_128B <"$dst = vmem($src1+#$src2)">, 76 def V6_vL32b_nt_ai_128B : T_vload_ai_128B <"$dst = vmem($src1+#$src2):nt">, 81 def V6_vL32Ub_ai : T_vload_ai <"$dst = vmemu($src1+#$src2)">, 83 def V6_vL32Ub_ai_128B : T_vload_ai_128B <"$dst = vmemu($src1+#$src2)">, 89 def V6_vL32b_cur_ai : T_vload_ai <"$dst.cur = vmem($src1+#$src2)">, 91 def V6_vL32b_nt_cur_ai : T_vload_ai <"$dst.cur = vmem($src1+#$src2):nt">, [all …]
|
D | HexagonInstrInfoV5.td | 48 (sra (i64 (add (i64 (sra I64:$src1, u6ImmPred:$src2)), 1)), 57 : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6Imm:$src2), 58 "$dst = asrrnd($src1, #$src2)">; 82 def CONST64_Float_Real : LDInst<(outs DoubleRegs:$dst), (ins f64imm:$src1), 83 "$dst = CONST64(#$src1)", 84 [(set F64:$dst, fpimm:$src1)]>, 88 def CONST32_Float_Real : LDInst<(outs IntRegs:$dst), (ins f32imm:$src1), 89 "$dst = CONST32(#$src1)", 90 [(set F32:$dst, fpimm:$src1)]>, 102 def TFRI_f : ALU32_ri<(outs IntRegs:$dst), (ins f32Ext:$src1), [all …]
|
D | HexagonIntrinsicsV60.td | 29 (ins VecDblRegs:$src1), 30 "$dst=vassignp_W($src1)", 31 [(set VecDblRegs:$dst, (int_hexagon_V6_vassignp VecDblRegs:$src1))]>; 35 (ins VecDblRegs128B:$src1), 36 "$dst=vassignp_W_128B($src1)", 38 VecDblRegs128B:$src1))]>; 42 (ins VecDblRegs:$src1), 43 "$dst=lo_W($src1)", 44 [(set VectorRegs:$dst, (int_hexagon_V6_lo VecDblRegs:$src1))]>; 48 (ins VecDblRegs:$src1), [all …]
|
/external/llvm/lib/Target/X86/ |
D | X86InstrXOP.td | 89 (ins VR128:$src1, VR128:$src2), 90 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 92 (vt128 (OpNode (vt128 VR128:$src1), (vt128 VR128:$src2))))]>, 95 (ins VR128:$src1, i128mem:$src2), 96 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 98 (vt128 (OpNode (vt128 VR128:$src1), 102 (ins i128mem:$src1, VR128:$src2), 103 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 105 (vt128 (OpNode (vt128 (bitconvert (loadv2i64 addr:$src1))), 128 (ins VR128:$src1, u8imm:$src2), [all …]
|
/external/pcre/dist2/src/sljit/ |
D | sljitNativePPC_32.c | 45 sljit_s32 dst, sljit_s32 src1, sljit_s32 src2) in emit_single_op() argument 52 SLJIT_ASSERT(src1 == TMP_REG1); in emit_single_op() 59 SLJIT_ASSERT(src1 == TMP_REG1); in emit_single_op() 74 SLJIT_ASSERT(src1 == TMP_REG1); in emit_single_op() 86 SLJIT_ASSERT(src1 == TMP_REG1); in emit_single_op() 90 SLJIT_ASSERT(src1 == TMP_REG1); in emit_single_op() 95 SLJIT_ASSERT(src1 == TMP_REG1); in emit_single_op() 101 …return push_inst(compiler, ADD | OE(ALT_SET_FLAGS) | RC(ALT_SET_FLAGS) | D(dst) | A(src1) | B(src2… in emit_single_op() 109 return push_inst(compiler, ADDIS | D(dst) | A(src1) | compiler->imm); in emit_single_op() 112 …FAIL_IF(push_inst(compiler, ADDIS | D(dst) | A(src1) | (((compiler->imm >> 16) & 0xffff) + ((compi… in emit_single_op() [all …]
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/ |
D | X86InstrXOP.td | 97 (ins VR128:$src1, VR128:$src2), 98 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 100 (vt128 (OpNode (vt128 VR128:$src1), (vt128 VR128:$src2))))]>, 103 (ins VR128:$src1, i128mem:$src2), 104 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 106 (vt128 (OpNode (vt128 VR128:$src1), 110 (ins i128mem:$src1, VR128:$src2), 111 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 113 (vt128 (OpNode (vt128 (load addr:$src1)), 119 (ins VR128:$src1, VR128:$src2), [all …]
|
/external/llvm-project/llvm/lib/Target/X86/ |
D | X86InstrXOP.td | 97 (ins VR128:$src1, VR128:$src2), 98 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 100 (vt128 (OpNode (vt128 VR128:$src1), (vt128 VR128:$src2))))]>, 103 (ins VR128:$src1, i128mem:$src2), 104 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 106 (vt128 (OpNode (vt128 VR128:$src1), 110 (ins i128mem:$src1, VR128:$src2), 111 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 113 (vt128 (OpNode (vt128 (load addr:$src1)), 119 (ins VR128:$src1, VR128:$src2), [all …]
|
/external/llvm/test/CodeGen/SystemZ/ |
D | int-cmp-38.ll | 10 define i32 @f1(i32 %src1) { 17 %cond = icmp slt i32 %src1, %src2 20 %mul = mul i32 %src1, %src1 23 %res = phi i32 [ %src1, %entry ], [ %mul, %mulb ] 28 define i32 @f2(i32 %src1) { 35 %cond = icmp ult i32 %src1, %src2 38 %mul = mul i32 %src1, %src1 41 %res = phi i32 [ %src1, %entry ], [ %mul, %mulb ] 46 define i32 @f3(i32 %src1) { 53 %cond = icmp eq i32 %src1, %src2 [all …]
|
/external/llvm-project/llvm/test/CodeGen/SystemZ/ |
D | int-cmp-38.ll | 10 define i32 @f1(i32 %src1) { 17 %cond = icmp slt i32 %src1, %src2 20 %mul = mul i32 %src1, %src1 23 %res = phi i32 [ %src1, %entry ], [ %mul, %mulb ] 28 define i32 @f2(i32 %src1) { 35 %cond = icmp ult i32 %src1, %src2 38 %mul = mul i32 %src1, %src1 41 %res = phi i32 [ %src1, %entry ], [ %mul, %mulb ] 46 define i32 @f3(i32 %src1) { 53 %cond = icmp eq i32 %src1, %src2 [all …]
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | sum_squares_msa.c | 22 uint64_t src0, src1, src2, src3; in vpx_sum_squares_2d_i16_msa() local 26 LD4(src, src_stride, src0, src1, src2, src3); in vpx_sum_squares_2d_i16_msa() 27 INSERT_D2_SH(src0, src1, diff0); in vpx_sum_squares_2d_i16_msa() 35 v8i16 src0, src1, src2, src3, src4, src5, src6, src7; in vpx_sum_squares_2d_i16_msa() local 37 LD_SH8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_sum_squares_2d_i16_msa() 38 DOTP_SH2_SW(src0, src1, src0, src1, mul0, mul1); in vpx_sum_squares_2d_i16_msa() 47 v8i16 src0, src1, src2, src3, src4, src5, src6, src7; in vpx_sum_squares_2d_i16_msa() local 49 LD_SH8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_sum_squares_2d_i16_msa() 50 DOTP_SH2_SW(src0, src1, src0, src1, mul0, mul1); in vpx_sum_squares_2d_i16_msa() 54 LD_SH8(src + 8, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_sum_squares_2d_i16_msa() [all …]
|
/external/llvm-project/llvm/test/CodeGen/AMDGPU/GlobalISel/ |
D | orn2.ll | 5 define amdgpu_ps i32 @s_orn2_i32(i32 inreg %src0, i32 inreg %src1) { 10 %not.src1 = xor i32 %src1, -1 11 %or = or i32 %src0, %not.src1 15 define amdgpu_ps i32 @s_orn2_i32_commute(i32 inreg %src0, i32 inreg %src1) { 20 %not.src1 = xor i32 %src1, -1 21 %or = or i32 %not.src1, %src0 25 define amdgpu_ps { i32, i32 } @s_orn2_i32_multi_use(i32 inreg %src0, i32 inreg %src1) { 31 %not.src1 = xor i32 %src1, -1 32 %or = or i32 %src0, %not.src1 34 %insert.1 = insertvalue { i32, i32 } %insert.0, i32 %not.src1, 1 [all …]
|
D | andn2.ll | 5 define amdgpu_ps i32 @s_andn2_i32(i32 inreg %src0, i32 inreg %src1) { 10 %not.src1 = xor i32 %src1, -1 11 %and = and i32 %src0, %not.src1 15 define amdgpu_ps i32 @s_andn2_i32_commute(i32 inreg %src0, i32 inreg %src1) { 20 %not.src1 = xor i32 %src1, -1 21 %and = and i32 %not.src1, %src0 25 define amdgpu_ps { i32, i32 } @s_andn2_i32_multi_use(i32 inreg %src0, i32 inreg %src1) { 31 %not.src1 = xor i32 %src1, -1 32 %and = and i32 %src0, %not.src1 34 %insert.1 = insertvalue { i32, i32 } %insert.0, i32 %not.src1, 1 [all …]
|