/external/libvpx/libvpx/vpx_dsp/mips/ |
D | sum_squares_msa.c | 18 v4i32 mul0, mul1; in vpx_sum_squares_2d_i16_msa() local 29 DOTP_SH2_SW(diff0, diff1, diff0, diff1, mul0, mul1); in vpx_sum_squares_2d_i16_msa() 30 mul0 += mul1; in vpx_sum_squares_2d_i16_msa() 31 res0 = __msa_hadd_s_d(mul0, mul0); in vpx_sum_squares_2d_i16_msa() 38 DOTP_SH2_SW(src0, src1, src0, src1, mul0, mul1); in vpx_sum_squares_2d_i16_msa() 39 DPADD_SH2_SW(src2, src3, src2, src3, mul0, mul1); in vpx_sum_squares_2d_i16_msa() 40 DPADD_SH2_SW(src4, src5, src4, src5, mul0, mul1); in vpx_sum_squares_2d_i16_msa() 41 DPADD_SH2_SW(src6, src7, src6, src7, mul0, mul1); in vpx_sum_squares_2d_i16_msa() 42 mul0 += mul1; in vpx_sum_squares_2d_i16_msa() 43 res0 = __msa_hadd_s_d(mul0, mul0); in vpx_sum_squares_2d_i16_msa() [all …]
|
D | deblock_msa.c | 551 v4i32 mul0, mul1, mul2, mul3; in vpx_mbpost_proc_across_ip_msa() local 581 MUL4(sum0_w, sub0, sum1_w, sub1, sum2_w, sub2, sum3_w, sub3, mul0, mul1, in vpx_mbpost_proc_across_ip_msa() 583 sum_sq0[0] = sum_sq + mul0[0]; in vpx_mbpost_proc_across_ip_msa() 585 sum_sq0[cnt + 1] = sum_sq0[cnt] + mul0[cnt + 1]; in vpx_mbpost_proc_across_ip_msa() 656 v4i32 mul0 = { 0 }; in vpx_mbpost_proc_down_msa() local 681 mul0 += (v4i32)__msa_ilvr_h((v8i16)zero, (v8i16)mult0); in vpx_mbpost_proc_down_msa() 707 mul0 += add0 * sub0; in vpx_mbpost_proc_down_msa() 719 total0 = mul0 * __msa_ldi_w(15); in vpx_mbpost_proc_down_msa()
|
/external/vulkan-validation-layers/libs/glm/detail/ |
D | intrinsic_geometric.inl | 54 __m128 mul0 = _mm_mul_ps(v1, v2); 55 __m128 swp0 = _mm_shuffle_ps(mul0, mul0, _MM_SHUFFLE(2, 3, 0, 1)); 56 __m128 add0 = _mm_add_ps(mul0, swp0); 66 __m128 mul0 = _mm_mul_ps(v1, v2); local 67 __m128 mov0 = _mm_movehl_ps(mul0, mul0); 68 __m128 add0 = _mm_add_ps(mov0, mul0); 81 __m128 mul0 = _mm_mul_ps(swp0, swp3); local 83 __m128 sub0 = _mm_sub_ps(mul0, mul1); 92 __m128 mul0 = _mm_mul_ps(v, isr0); local 93 return mul0; [all …]
|
D | intrinsic_common.inl | 230 __m128 mul0 = _mm_mul_ps(y, flr0); local 231 __m128 sub0 = _mm_sub_ps(x, mul0); 258 __m128 mul0 = _mm_mul_ps(v1, sub0); local 260 __m128 add0 = _mm_add_ps(mul0, mul1); 279 __m128 mul0 = _mm_mul_ps(glm::detail::two, clp0); local 280 __m128 sub2 = _mm_sub_ps(glm::detail::three, mul0);
|
/external/llvm/test/Transforms/StraightLineStrengthReduce/ |
D | slsr-mul.ll | 8 %mul0 = mul i32 %b, %s 11 call void @foo(i32 %mul0) 29 %mul0 = mul i32 %b, %s 32 call void @foo(i32 %mul0) 51 %mul0 = mul i32 %b, %s 53 call void @foo(i32 %mul0) 78 %mul0 = mul i32 %a, %b 85 call void @foo(i32 %mul0) 99 ; mul0 = b * s; 101 ; mul1 = mul0 + bump; // GVN ensures mul1 and mul2 use the same bump. [all …]
|
/external/libvpx/libvpx/vp8/common/mips/msa/ |
D | idct_msa.c | 180 v8i16 input0, input1, dequant_in0, dequant_in1, mul0, mul1; in dequant_idct4x4_addblk_msa() local 188 MUL2(input0, dequant_in0, input1, dequant_in1, mul0, mul1); in dequant_idct4x4_addblk_msa() 189 PCKEV_D2_SH(zero, mul0, zero, mul1, in0, in2); in dequant_idct4x4_addblk_msa() 190 PCKOD_D2_SH(zero, mul0, zero, mul1, in1, in3); in dequant_idct4x4_addblk_msa() 192 PCKEV_D2_SH(hz1_h, hz0_h, hz3_h, hz2_h, mul0, mul1); in dequant_idct4x4_addblk_msa() 193 UNPCK_SH_SW(mul0, hz0_w, hz1_w); in dequant_idct4x4_addblk_msa() 218 v8i16 in0, in1, in2, in3, mul0, mul1, mul2, mul3, dequant_in0, dequant_in1; in dequant_idct4x4_addblk_2x_msa() local 227 mul0, mul1, mul2, mul3); in dequant_idct4x4_addblk_2x_msa() 228 PCKEV_D2_SH(mul2, mul0, mul3, mul1, in0, in2); in dequant_idct4x4_addblk_2x_msa() 229 PCKOD_D2_SH(mul2, mul0, mul3, mul1, in1, in3); in dequant_idct4x4_addblk_2x_msa()
|
/external/boringssl/src/crypto/fipsmodule/bn/asm/ |
D | x86-mont.pl | 138 $mul0="mm4"; 157 &movd ($mul0,&DWP(0,$bp)); # bp[0] 161 &pmuludq($mul1,$mul0); # ap[0]*bp[0] 179 &pmuludq($acc0,$mul0); # ap[j]*bp[0] 197 &pmuludq($acc0,$mul0); # ap[num-1]*bp[0] 217 &movd ($mul0,&DWP(0,$bp,$i,4)); # bp[i] 221 &pmuludq($mul1,$mul0); # ap[0]*bp[i] 244 &pmuludq($acc0,$mul0); # ap[j]*bp[i] 265 &pmuludq($acc0,$mul0); # ap[num-1]*bp[i]
|
/external/vulkan-validation-layers/libs/glm/gtx/ |
D | simd_quat.inl | 123 __m128 mul0 = _mm_mul_ps(q1.Data, _mm_shuffle_ps(q2.Data, q2.Data, _MM_SHUFFLE(0, 1, 2, 3))); local 129 __m128 add0 = _mm_dp_ps(mul0, _mm_set_ps(1.0f, -1.0f, 1.0f, 1.0f), 0xff); 134 mul0 = _mm_mul_ps(mul0, _mm_set_ps(1.0f, -1.0f, 1.0f, 1.0f)); 135 __m128 add0 = _mm_add_ps(mul0, _mm_movehl_ps(mul0, mul0));
|
D | simd_vec4.inl | 650 __m128 mul0 = _mm_mul_ps(x.Data, isr0); local 651 return mul0; 661 __m128 mul0 = _mm_mul_ps(x.Data, isr0); local 662 return mul0;
|
/external/llvm/test/CodeGen/SystemZ/ |
D | fp-mul-01.ll | 106 %mul0 = fmul float %ret, %val0 107 %mul1 = fmul float %mul0, %val1
|
D | fp-mul-03.ll | 108 %mul0 = fmul double %ret, %val0 109 %mul1 = fmul double %mul0, %val1
|
D | fp-move-02.ll | 281 %mul0 = fmul double %conv0, %factor 282 store volatile double %mul0, double *@dptr 330 %double0 = phi double [ 1.0, %entry ], [ %mul0, %loop ] 342 %mul0 = fmul double %double0, %factor 359 %conv0 = bitcast double %mul0 to i64
|
D | int-mul-04.ll | 127 %mul0 = mul i64 %ret, %val0 128 %mul1 = mul i64 %mul0, %val1
|
D | int-mul-02.ll | 162 %mul0 = mul i32 %ret, %val0 163 %mul1 = mul i32 %mul0, %val1
|
D | int-mul-03.ll | 168 %mul0 = mul i64 %ret, %ext0 169 %mul1 = mul i64 %mul0, %ext1
|
D | fp-mul-02.ll | 144 %mul0 = fmul double %accext0, %ext0 145 %extra0 = fmul double %mul0, 1.01
|
D | fp-mul-04.ll | 164 %mul0 = fmul fp128 %accext0, %ext0 166 %extra0 = fmul fp128 %mul0, %const0
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | madmk.ll | 52 %mul0 = fmul float %a, 10.0 54 %madmk0 = fadd float %mul0, %b
|
D | madak.ll | 54 %mul0 = fmul float %a, %b 56 %madak0 = fadd float %mul0, 10.0
|
/external/webp/src/enc/ |
D | backward_references_enc.c | 658 const double mul0 = 0.68; in AddSingleLiteralWithCostModel() local 659 cost_val += GetCacheCost(cost_model, ix) * mul0; in AddSingleLiteralWithCostModel()
|