/external/libvpx/libvpx/third_party/libyuv/source/ |
D | compare_win.cc | 46 paddd xmm0, xmm1 in SumSquareError_SSE2() 47 paddd xmm0, xmm2 in SumSquareError_SSE2() 52 paddd xmm0, xmm1 in SumSquareError_SSE2() 54 paddd xmm0, xmm1 in SumSquareError_SSE2() 170 paddd xmm3, xmm4 // add 16 results in HashDjb2_SSE41() 171 paddd xmm1, xmm2 in HashDjb2_SSE41() 172 paddd xmm1, xmm3 in HashDjb2_SSE41() 175 paddd xmm1, xmm2 in HashDjb2_SSE41() 177 paddd xmm1, xmm2 in HashDjb2_SSE41() 178 paddd xmm0, xmm1 in HashDjb2_SSE41() [all …]
|
/external/libvpx/libvpx/vp8/encoder/x86/ |
D | encodeopt.asm | 39 paddd xmm0, xmm2 47 paddd xmm0, xmm1 51 paddd xmm0, xmm1 97 paddd mm1, mm5 110 paddd mm3, mm5 112 paddd mm1, mm3 116 paddd mm0, mm1 166 paddd mm2, mm5 168 paddd mm2, mm3 181 paddd mm2, mm5 [all …]
|
D | fwalsh_sse2.asm | 97 paddd xmm0, xmm4 ; b21 b20 a21 a20 100 paddd xmm1, xmm6 ; b23 b22 a23 a22 117 paddd xmm0, xmm4 118 paddd xmm2, xmm5 119 paddd xmm0, [GLOBAL(cd3)] 120 paddd xmm2, [GLOBAL(cd3)] 121 paddd xmm1, xmm6 122 paddd xmm3, xmm7 123 paddd xmm1, [GLOBAL(cd3)] 124 paddd xmm3, [GLOBAL(cd3)]
|
D | dct_sse2.asm | 100 paddd xmm3, XMMWORD PTR[GLOBAL(_14500)] 101 paddd xmm4, XMMWORD PTR[GLOBAL(_7500)] 140 paddd xmm0, xmm5 141 paddd xmm1, xmm5 151 paddd xmm3, XMMWORD PTR[GLOBAL(_12000)] 152 paddd xmm4, XMMWORD PTR[GLOBAL(_51000)] 252 paddd xmm1, XMMWORD PTR[GLOBAL(_14500)] 253 paddd xmm4, XMMWORD PTR[GLOBAL(_14500)] 254 paddd xmm3, XMMWORD PTR[GLOBAL(_7500)] 255 paddd xmm5, XMMWORD PTR[GLOBAL(_7500)] [all …]
|
D | dct_mmx.asm | 100 paddd mm1, MMWORD PTR[GLOBAL(_14500)] 101 paddd mm4, MMWORD PTR[GLOBAL(_14500)] 102 paddd mm3, MMWORD PTR[GLOBAL(_7500)] 103 paddd mm5, MMWORD PTR[GLOBAL(_7500)] 185 paddd mm1, MMWORD PTR[GLOBAL(_12000)] 186 paddd mm4, MMWORD PTR[GLOBAL(_12000)] 187 paddd mm3, MMWORD PTR[GLOBAL(_51000)] 188 paddd mm5, MMWORD PTR[GLOBAL(_51000)]
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | sad_sse2.asm | 69 paddd m1, m2 70 paddd m3, m4 72 paddd m0, m1 74 paddd m0, m3 79 paddd m0, m1 112 paddd m1, m2 113 paddd m3, m4 115 paddd m0, m1 117 paddd m0, m3 122 paddd m0, m1 [all …]
|
D | highbd_variance_impl_sse2.asm | 91 paddd xmm6, xmm1 98 paddd xmm6, xmm3 105 paddd xmm6, xmm1 110 paddd xmm6, xmm3 121 paddd xmm7, xmm5 122 paddd xmm7, xmm2 135 paddd xmm6, xmm4 139 paddd xmm7, xmm5 147 paddd xmm6, xmm4 148 paddd xmm7, xmm5 [all …]
|
D | halfpix_variance_impl_sse2.asm | 71 …paddd xmm7, xmm5 ; xmm7 += accumulated square column differenc… 72 paddd xmm7, xmm4 89 paddd xmm0, xmm1 95 paddd xmm6, xmm7 99 paddd xmm0, xmm1 107 paddd xmm6, xmm7 108 paddd xmm0, xmm1 176 …paddd xmm7, xmm5 ; xmm7 += accumulated square column differenc… 177 paddd xmm7, xmm4 194 paddd xmm0, xmm1 [all …]
|
D | sad4d_sse2.asm | 50 paddd m6, m1 51 paddd m7, m3 90 paddd m4, m1 93 paddd m5, m2 94 paddd m6, m3 96 paddd m7, m1 127 paddd m4, m1 129 paddd m5, m2 130 paddd m6, m3 132 paddd m7, m1 [all …]
|
D | highbd_sad4d_sse2.asm | 57 paddd m4, m2 66 paddd m5, m2 75 paddd m6, m2 84 paddd m7, m2 132 paddd m4, m2 139 paddd m5, m2 146 paddd m6, m2 152 paddd m7, m2 164 paddd m4, m2 171 paddd m5, m2 [all …]
|
D | highbd_sad_sse2.asm | 98 paddd m0, m1 99 paddd m0, m3 137 paddd m0, m1 139 paddd m0, m3 145 paddd m0, m1 148 paddd m0, m1 205 paddd m0, m1 207 paddd m0, m3 212 paddd m0, m1 215 paddd m0, m1 [all …]
|
D | variance_impl_mmx.asm | 42 paddd mm4, mm0 43 paddd mm4, mm1 44 paddd mm4, mm2 45 paddd mm4, mm3 116 paddd mm7, mm0 ; accumulate in mm7 117 paddd mm7, mm2 ; accumulate in mm7 139 paddd mm7, mm0 ; accumulate in mm7 140 paddd mm7, mm2 ; accumulate in mm7 162 paddd mm7, mm0 ; accumulate in mm7 163 paddd mm7, mm2 ; accumulate in mm7 [all …]
|
/external/libyuv/files/source/ |
D | compare_win.cc | 47 paddd xmm0, xmm1 in SumSquareError_SSE2() 48 paddd xmm0, xmm2 in SumSquareError_SSE2() 53 paddd xmm0, xmm1 in SumSquareError_SSE2() 55 paddd xmm0, xmm1 in SumSquareError_SSE2() 162 paddd xmm3, xmm4 // add 16 results in HashDjb2_SSE41() 163 paddd xmm1, xmm2 in HashDjb2_SSE41() 164 paddd xmm1, xmm3 in HashDjb2_SSE41() 167 paddd xmm1, xmm2 in HashDjb2_SSE41() 169 paddd xmm1, xmm2 in HashDjb2_SSE41() 170 paddd xmm0, xmm1 in HashDjb2_SSE41()
|
/external/libjpeg-turbo/simd/ |
D | jccolext-mmx.asm | 306 paddd mm7,mm1 307 paddd mm4,mm6 308 paddd mm7,mm5 309 paddd mm4,mm5 338 paddd mm5,mm0 339 paddd mm4,mm6 340 paddd mm5,mm1 341 paddd mm4,mm1 366 paddd mm0, MMWORD [wk(4)] 367 paddd mm4, MMWORD [wk(5)] [all …]
|
D | jccolext-sse2-64.asm | 320 paddd xmm7,xmm1 321 paddd xmm4,xmm6 322 paddd xmm7,xmm5 323 paddd xmm4,xmm5 352 paddd xmm5,xmm0 353 paddd xmm4,xmm6 354 paddd xmm5,xmm1 355 paddd xmm4,xmm1 380 paddd xmm0, XMMWORD [wk(4)] 381 paddd xmm4, XMMWORD [wk(5)] [all …]
|
D | jccolext-sse2.asm | 334 paddd xmm7,xmm1 335 paddd xmm4,xmm6 336 paddd xmm7,xmm5 337 paddd xmm4,xmm5 366 paddd xmm5,xmm0 367 paddd xmm4,xmm6 368 paddd xmm5,xmm1 369 paddd xmm4,xmm1 394 paddd xmm0, XMMWORD [wk(4)] 395 paddd xmm4, XMMWORD [wk(5)] [all …]
|
/external/boringssl/src/crypto/sha/asm/ |
D | sha256-586.pl | 550 &paddd ($Wi,@MSG[0]); 559 &paddd ($Wi,@MSG[1]); 568 &paddd ($Wi,@MSG[2]); 575 &paddd (@MSG[0],$TMP); 580 &paddd ($Wi,@MSG[3]); 587 &paddd (@MSG[1],$TMP); 593 &paddd ($Wi,@MSG[0]); 600 &paddd (@MSG[2],$TMP); 607 &paddd ($Wi,@MSG[0]); 614 &paddd (@MSG[2],$TMP); [all …]
|
/external/fec/ |
D | dotprod_sse2_assist.s | 37 paddd %xmm1,%xmm0 41 paddd %xmm1,%xmm0 45 paddd %xmm1,%xmm0 51 paddd %xmm1,%xmm0
|
D | dotprod_mmx_assist.s | 37 paddd %mm1,%mm0 41 paddd %mm1,%mm0 45 paddd %mm1,%mm0 51 paddd %mm1,%mm0
|
/external/llvm/test/CodeGen/X86/ |
D | lower-bitcast.ll | 12 ; single paddd instruction. At the moment we produce the sequence 18 ; CHECK-NEXT: paddd 24 ; CHECK-WIDE: paddd 37 ; CHECK: paddd 42 ; CHECK-WIDE: paddd 71 ; FIXME: At the moment we still produce the sequence pshufd+paddd+pshufd. 72 ; Ideally, we should fold that sequence into a single paddd. This is fixed with 77 ; CHECK-NEXT: paddd 83 ; CHECK-WIDE-NEXT: paddd
|
D | combine-multiplies.ll | 75 ; paddd %xmm0, %xmm1 84 ; paddd %xmm0, %xmm2 85 ; paddd .LCPI1_3, %xmm0 98 ; CHECK-NEXT: paddd %xmm0, [[C11]] 107 ; CHECK-NEXT: paddd [[T6]], [[C242]] 108 ; CHECK-NEXT: paddd .LCPI1_3, [[C726:%xmm[0-9]]] 136 ; CHECK-NEXT: paddd %xmm0, [[C11]] 146 ; CHECK-NEXT: paddd [[T6]], [[C242]] 147 ; CHECK-NEXT: paddd .LCPI2_3, [[C726:%xmm[0-9]]]
|
D | add_shl_constant.ll | 27 ; CHECK: paddd %xmm1, %[[REG]] 28 ; CHECK: paddd LCPI2_0(%rip), %[[REG:xmm[0-9]+]] 41 ; CHECK: paddd %xmm1, %[[REG]] 42 ; CHECK: paddd LCPI3_0(%rip), %[[REG:xmm[0-9]+]]
|
D | widen_load-2.ll | 10 ; CHECK-NEXT: paddd (%{{.*}}), %[[R0]] 26 ; CHECK-NEXT: paddd %[[R0]], %[[R1]] 41 ; CHECK-NEXT: paddd (%{{.*}}), %[[R0]] 42 ; CHECK-NEXT: paddd 16(%{{.*}}), %[[R1]] 59 ; CHECK-NEXT: paddd (%{{.*}}), %[[R0]] 60 ; CHECK-NEXT: paddd 16(%{{.*}}), %[[R1]] 61 ; CHECK-NEXT: paddd 32(%{{.*}}), %[[R2]] 78 ; CHECK-NEXT: paddd %[[R0]], %[[R1]] 145 ; CHECK-NEXT: paddd %[[R0]], %[[R1]]
|
/external/libvpx/libvpx/vp8/common/x86/ |
D | postproc_sse2.asm | 325 paddd xmm6, xmm1 ; 327 paddd xmm7, xmm2 ; 350 paddd xmm6, xmm2 351 paddd xmm7, xmm4 559 paddd xmm1, xmm1 ; -8*2 -7*2 -6*2 -5*2 561 paddd xmm1, xmm2 ; 7+-8 8+-7 9+-6 10+-5 564 paddd xmm6, xmm2 565 paddd xmm7, xmm1 576 paddd xmm6, xmm4 577 paddd xmm7, xmm3 [all …]
|
/external/libyuv/files/util/ |
D | psnr.cc | 138 paddd xmm0, xmm1 in SumSquareError_SSE2() 139 paddd xmm0, xmm2 in SumSquareError_SSE2() 144 paddd xmm0, xmm1 in SumSquareError_SSE2() 146 paddd xmm0, xmm1 in SumSquareError_SSE2()
|