/external/libvpx/libvpx/third_party/libyuv/source/ |
D | compare_win.cc | 46 paddd xmm0, xmm1 in SumSquareError_SSE2() 47 paddd xmm0, xmm2 in SumSquareError_SSE2() 52 paddd xmm0, xmm1 in SumSquareError_SSE2() 54 paddd xmm0, xmm1 in SumSquareError_SSE2() 170 paddd xmm3, xmm4 // add 16 results in HashDjb2_SSE41() 171 paddd xmm1, xmm2 in HashDjb2_SSE41() 172 paddd xmm1, xmm3 in HashDjb2_SSE41() 175 paddd xmm1, xmm2 in HashDjb2_SSE41() 177 paddd xmm1, xmm2 in HashDjb2_SSE41() 178 paddd xmm0, xmm1 in HashDjb2_SSE41() [all …]
|
/external/libvpx/libvpx/vp8/encoder/x86/ |
D | encodeopt.asm | 39 paddd xmm0, xmm2 47 paddd xmm0, xmm1 51 paddd xmm0, xmm1 97 paddd mm1, mm5 110 paddd mm3, mm5 112 paddd mm1, mm3 116 paddd mm0, mm1 166 paddd mm2, mm5 168 paddd mm2, mm3 181 paddd mm2, mm5 [all …]
|
D | fwalsh_sse2.asm | 97 paddd xmm0, xmm4 ; b21 b20 a21 a20 100 paddd xmm1, xmm6 ; b23 b22 a23 a22 117 paddd xmm0, xmm4 118 paddd xmm2, xmm5 119 paddd xmm0, [GLOBAL(cd3)] 120 paddd xmm2, [GLOBAL(cd3)] 121 paddd xmm1, xmm6 122 paddd xmm3, xmm7 123 paddd xmm1, [GLOBAL(cd3)] 124 paddd xmm3, [GLOBAL(cd3)]
|
D | dct_sse2.asm | 100 paddd xmm3, XMMWORD PTR[GLOBAL(_14500)] 101 paddd xmm4, XMMWORD PTR[GLOBAL(_7500)] 140 paddd xmm0, xmm5 141 paddd xmm1, xmm5 151 paddd xmm3, XMMWORD PTR[GLOBAL(_12000)] 152 paddd xmm4, XMMWORD PTR[GLOBAL(_51000)] 252 paddd xmm1, XMMWORD PTR[GLOBAL(_14500)] 253 paddd xmm4, XMMWORD PTR[GLOBAL(_14500)] 254 paddd xmm3, XMMWORD PTR[GLOBAL(_7500)] 255 paddd xmm5, XMMWORD PTR[GLOBAL(_7500)] [all …]
|
D | dct_mmx.asm | 100 paddd mm1, MMWORD PTR[GLOBAL(_14500)] 101 paddd mm4, MMWORD PTR[GLOBAL(_14500)] 102 paddd mm3, MMWORD PTR[GLOBAL(_7500)] 103 paddd mm5, MMWORD PTR[GLOBAL(_7500)] 185 paddd mm1, MMWORD PTR[GLOBAL(_12000)] 186 paddd mm4, MMWORD PTR[GLOBAL(_12000)] 187 paddd mm3, MMWORD PTR[GLOBAL(_51000)] 188 paddd mm5, MMWORD PTR[GLOBAL(_51000)]
|
D | quantize_mmx.asm | 222 paddd mm5, mm1 243 paddd mm5, mm0 245 paddd mm5, mm1 249 paddd mm0, mm5
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | sad_sse2.asm | 69 paddd m1, m2 70 paddd m3, m4 72 paddd m0, m1 74 paddd m0, m3 79 paddd m0, m1 112 paddd m1, m2 113 paddd m3, m4 115 paddd m0, m1 117 paddd m0, m3 122 paddd m0, m1 [all …]
|
D | highbd_variance_impl_sse2.asm | 91 paddd xmm6, xmm1 98 paddd xmm6, xmm3 105 paddd xmm6, xmm1 110 paddd xmm6, xmm3 121 paddd xmm7, xmm5 122 paddd xmm7, xmm2 135 paddd xmm6, xmm4 139 paddd xmm7, xmm5 147 paddd xmm6, xmm4 148 paddd xmm7, xmm5 [all …]
|
D | sad4d_sse2.asm | 50 paddd m6, m1 51 paddd m7, m3 90 paddd m4, m1 93 paddd m5, m2 94 paddd m6, m3 96 paddd m7, m1 127 paddd m4, m1 129 paddd m5, m2 130 paddd m6, m3 132 paddd m7, m1 [all …]
|
D | highbd_sad4d_sse2.asm | 57 paddd m4, m2 66 paddd m5, m2 75 paddd m6, m2 84 paddd m7, m2 132 paddd m4, m2 139 paddd m5, m2 146 paddd m6, m2 152 paddd m7, m2 164 paddd m4, m2 171 paddd m5, m2 [all …]
|
D | highbd_sad_sse2.asm | 98 paddd m0, m1 99 paddd m0, m3 137 paddd m0, m1 139 paddd m0, m3 145 paddd m0, m1 148 paddd m0, m1 205 paddd m0, m1 207 paddd m0, m3 212 paddd m0, m1 215 paddd m0, m1 [all …]
|
D | variance_impl_mmx.asm | 42 paddd mm4, mm0 43 paddd mm4, mm1 44 paddd mm4, mm2 45 paddd mm4, mm3 116 paddd mm7, mm0 ; accumulate in mm7 117 paddd mm7, mm2 ; accumulate in mm7 139 paddd mm7, mm0 ; accumulate in mm7 140 paddd mm7, mm2 ; accumulate in mm7 162 paddd mm7, mm0 ; accumulate in mm7 163 paddd mm7, mm2 ; accumulate in mm7 [all …]
|
D | ssim_opt_x86_64.asm | 19 paddd xmm13, xmm1 ; sum_sq_s 22 paddd xmm12, xmm2 ; sum_sq_r 24 paddd xmm11, xmm3 ; sum_sxr 44 paddd %1, xmm1
|
D | vpx_high_subpixel_bilinear_sse2.asm | 48 paddd xmm0, xmm3 ;rounding 106 paddd xmm6, xmm4 ;rounding 107 paddd xmm0, xmm4 ;rounding 140 paddd xmm9, xmm4 ;rounding 141 paddd xmm6, xmm4 142 paddd xmm0, xmm4 143 paddd xmm2, xmm4
|
D | vpx_high_subpixel_8t_sse2.asm | 72 paddd xmm0, xmm1 ;sum 73 paddd xmm0, xmm2 74 paddd xmm0, xmm3 76 paddd xmm0, krd ;rounding 176 paddd xmm0, xmm6 177 paddd xmm0, xmm2 178 paddd xmm0, xmm3 179 paddd xmm5, xmm1 180 paddd xmm5, xmm7 181 paddd xmm5, xmm4 [all …]
|
/external/boringssl/src/crypto/sha/asm/ |
D | sha256-586.pl | 556 &paddd ($Wi,@MSG[0]); 565 &paddd ($Wi,@MSG[1]); 574 &paddd ($Wi,@MSG[2]); 581 &paddd (@MSG[0],$TMP); 586 &paddd ($Wi,@MSG[3]); 593 &paddd (@MSG[1],$TMP); 599 &paddd ($Wi,@MSG[0]); 606 &paddd (@MSG[2],$TMP); 613 &paddd ($Wi,@MSG[0]); 620 &paddd (@MSG[2],$TMP); [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | lower-bitcast.ll | 12 ; single paddd instruction. At the moment we produce the sequence 18 ; CHECK-NEXT: paddd 24 ; CHECK-WIDE: paddd 37 ; CHECK: paddd 42 ; CHECK-WIDE: paddd 71 ; FIXME: At the moment we still produce the sequence pshufd+paddd+pshufd. 72 ; Ideally, we should fold that sequence into a single paddd. This is fixed with 77 ; CHECK-NEXT: paddd 83 ; CHECK-WIDE-NEXT: paddd
|
D | add_shl_constant.ll | 27 ; CHECK: paddd %xmm1, %[[REG]] 28 ; CHECK: paddd LCPI2_0(%rip), %[[REG:xmm[0-9]+]] 41 ; CHECK: paddd %xmm1, %[[REG]] 42 ; CHECK: paddd LCPI3_0(%rip), %[[REG:xmm[0-9]+]]
|
D | widen_load-2.ll | 10 ; CHECK-NEXT: paddd (%{{.*}}), %[[R0]] 26 ; CHECK-NEXT: paddd %[[R0]], %[[R1]] 41 ; CHECK-NEXT: paddd (%{{.*}}), %[[R0]] 42 ; CHECK-NEXT: paddd 16(%{{.*}}), %[[R1]] 59 ; CHECK-NEXT: paddd (%{{.*}}), %[[R0]] 60 ; CHECK-NEXT: paddd 16(%{{.*}}), %[[R1]] 61 ; CHECK-NEXT: paddd 32(%{{.*}}), %[[R2]] 78 ; CHECK-NEXT: paddd %[[R0]], %[[R1]] 145 ; CHECK-NEXT: paddd %[[R0]], %[[R1]]
|
D | vector-idiv.ll | 19 ; SSE41-NEXT: paddd %xmm1, %xmm0 36 ; SSE-NEXT: paddd %xmm2, %xmm0 71 ; SSE41-NEXT: paddd %xmm5, %xmm0 80 ; SSE41-NEXT: paddd %xmm2, %xmm1 97 ; SSE-NEXT: paddd %xmm3, %xmm0 107 ; SSE-NEXT: paddd %xmm2, %xmm1 840 ; SSE41-NEXT: paddd %xmm0, %xmm1 844 ; SSE41-NEXT: paddd %xmm0, %xmm1 859 ; SSE-NEXT: paddd %xmm1, %xmm2 866 ; SSE-NEXT: paddd %xmm0, %xmm1 [all …]
|
D | unaligned-spill-folding.ll | 38 ; UNALIGNED-NOT: paddd {{.*}} # 16-byte Folded Reload 43 ; ALIGNED: paddd {{.*}} # 16-byte Folded Reload 48 ; FORCEALIGNED: paddd {{.*}} # 16-byte Folded Reload
|
/external/libyuv/files/source/ |
D | compare.cc | 108 paddd xmm3, xmm4 // add 16 results in HashDjb2_SSE41() 109 paddd xmm1, xmm2 in HashDjb2_SSE41() 111 paddd xmm1, xmm3 in HashDjb2_SSE41() 114 paddd xmm1, xmm2 in HashDjb2_SSE41() 116 paddd xmm1, xmm2 in HashDjb2_SSE41() 117 paddd xmm0, xmm1 in HashDjb2_SSE41() 280 paddd xmm0, xmm1 in SumSquareError_SSE2() 281 paddd xmm0, xmm2 in SumSquareError_SSE2() 285 paddd xmm0, xmm1 in SumSquareError_SSE2() 287 paddd xmm0, xmm1 in SumSquareError_SSE2()
|
/external/libvpx/libvpx/vp8/common/x86/ |
D | postproc_sse2.asm | 325 paddd xmm6, xmm1 ; 327 paddd xmm7, xmm2 ; 350 paddd xmm6, xmm2 351 paddd xmm7, xmm4 559 paddd xmm1, xmm1 ; -8*2 -7*2 -6*2 -5*2 561 paddd xmm1, xmm2 ; 7+-8 8+-7 9+-6 10+-5 564 paddd xmm6, xmm2 565 paddd xmm7, xmm1 576 paddd xmm6, xmm4 577 paddd xmm7, xmm3 [all …]
|
D | mfqe_sse2.asm | 216 paddd xmm5, xmm0 217 paddd xmm5, xmm1 227 paddd xmm0, [GLOBAL(t128)] 248 paddd xmm1, xmm5 251 paddd xmm1, xmm2 256 paddd xmm1, [GLOBAL(t128)]
|
/external/libvpx/libvpx/vp9/common/x86/ |
D | vp9_mfqe_sse2.asm | 217 paddd xmm5, xmm0 218 paddd xmm5, xmm1 228 paddd xmm0, [GLOBAL(t128)] 249 paddd xmm1, xmm5 252 paddd xmm1, xmm2 257 paddd xmm1, [GLOBAL(t128)]
|