/external/libvpx/libvpx/vpx_dsp/x86/ |
D | highbd_intrapred_sse2.asm | 27 pshuflw m1, m0, 0xe 29 pshuflw m1, m0, 0x1 33 pshuflw m0, m0, 0x0 63 pshuflw m0, m0, 0x0 103 pshuflw m0, m0, 0x0 154 pshuflw m0, m0, 0x0 262 pshuflw m1, m1, 0x0 274 pshuflw m2, m1, 0x0 275 pshuflw m5, m1, 0x55 285 pshuflw m2, m1, 0xaa [all …]
|
D | intrapred_sse2.asm | 147 pshuflw m0, m0, 0x0 168 pshuflw m0, m0, 0x0 188 pshuflw m0, m0, 0x0 214 pshuflw m0, m0, 0x0 240 pshuflw m0, m0, 0x0 267 pshuflw m0, m0, 0x0 331 pshuflw m0, m0, 0x0 361 pshuflw m0, m0, 0x0 390 pshuflw m0, m0, 0x0 448 pshuflw m0, m0, 0x0 [all …]
|
D | vpx_high_subpixel_8t_sse2.asm | 22 pshuflw xmm0, xmm7, 0b ;k0 23 pshuflw xmm1, xmm7, 01010101b ;k1 24 pshuflw xmm2, xmm7, 10101010b ;k2 25 pshuflw xmm3, xmm7, 11111111b ;k3 27 pshuflw xmm4, xmm7, 0b ;k4 28 pshuflw xmm5, xmm7, 01010101b ;k5 29 pshuflw xmm6, xmm7, 10101010b ;k6 30 pshuflw xmm7, xmm7, 11111111b ;k7 98 pshuflw xmm0, xmm7, 0b ;k0 99 pshuflw xmm1, xmm7, 01010101b ;k1 [all …]
|
D | vpx_subpixel_8t_sse2.asm | 22 pshuflw xmm0, xmm7, 0b ;k0 23 pshuflw xmm1, xmm7, 01010101b ;k1 24 pshuflw xmm2, xmm7, 10101010b ;k2 25 pshuflw xmm3, xmm7, 11111111b ;k3 27 pshuflw xmm4, xmm7, 0b ;k4 28 pshuflw xmm5, xmm7, 01010101b ;k5 29 pshuflw xmm6, xmm7, 10101010b ;k6 30 pshuflw xmm7, xmm7, 11111111b ;k7 95 pshuflw xmm0, xmm7, 0b ;k0 96 pshuflw xmm1, xmm7, 01010101b ;k1 [all …]
|
/external/libaom/libaom/aom_dsp/x86/ |
D | intrapred_asm_sse2.asm | 41 pshuflw m0, m0, 0x0 62 pshuflw m0, m0, 0x0 82 pshuflw m0, m0, 0x0 108 pshuflw m0, m0, 0x0 134 pshuflw m0, m0, 0x0 161 pshuflw m0, m0, 0x0 225 pshuflw m0, m0, 0x0 255 pshuflw m0, m0, 0x0 284 pshuflw m0, m0, 0x0 342 pshuflw m0, m0, 0x0 [all …]
|
D | aom_subpixel_8t_sse2.asm | 25 pshuflw xmm0, xmm7, 0b ;k0 26 pshuflw xmm1, xmm7, 01010101b ;k1 27 pshuflw xmm2, xmm7, 10101010b ;k2 28 pshuflw xmm3, xmm7, 11111111b ;k3 30 pshuflw xmm4, xmm7, 0b ;k4 31 pshuflw xmm5, xmm7, 01010101b ;k5 32 pshuflw xmm6, xmm7, 10101010b ;k6 33 pshuflw xmm7, xmm7, 11111111b ;k7 98 pshuflw xmm0, xmm7, 0b ;k0 99 pshuflw xmm1, xmm7, 01010101b ;k1 [all …]
|
D | aom_high_subpixel_8t_sse2.asm | 25 pshuflw xmm0, xmm7, 0b ;k0 26 pshuflw xmm1, xmm7, 01010101b ;k1 27 pshuflw xmm2, xmm7, 10101010b ;k2 28 pshuflw xmm3, xmm7, 11111111b ;k3 30 pshuflw xmm4, xmm7, 0b ;k4 31 pshuflw xmm5, xmm7, 01010101b ;k5 32 pshuflw xmm6, xmm7, 10101010b ;k6 33 pshuflw xmm7, xmm7, 11111111b ;k7 101 pshuflw xmm0, xmm7, 0b ;k0 102 pshuflw xmm1, xmm7, 01010101b ;k1 [all …]
|
D | highbd_intrapred_asm_sse2.asm | 30 pshuflw m1, m0, 0xe 32 pshuflw m1, m0, 0x1 36 pshuflw m0, m0, 0x0 66 pshuflw m0, m0, 0x0 106 pshuflw m0, m0, 0x0 157 pshuflw m0, m0, 0x0
|
/external/llvm/test/CodeGen/X86/ |
D | bswap-vector.ll | 20 ; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,0,3,2,4,5,6,7] 23 ; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] 53 ; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7] 56 ; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7] 87 ; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7] 91 ; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7] 125 ; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,0,3,2,4,5,6,7] 128 ; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] 133 ; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,0,3,2,4,5,6,7] 136 ; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,0,3,2,4,5,6,7] [all …]
|
D | vector-shuffle-128-v8.ll | 66 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] 86 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] 127 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,0,4,5,6,7] 143 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] 180 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,3,0,2,4,5,6,7] 211 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,0,4,5,6,7] 237 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] 262 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7] 287 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7] 312 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,0,4,5,6,7] [all …]
|
D | swizzle-2.ll | 303 ; legal shuffle (either pshuflw or pshufb depending on the masks). 308 ; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,2,0,4,5,6,7] 318 ; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,2,0,4,5,6,7] 338 ; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,3,0,4,5,6,7] 349 ; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,0,2,4,5,6,7] 360 ; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,0,3,4,5,6,7] 371 ; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,1,4,5,6,7] 392 ; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,0,3,4,5,6,7] 403 ; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,3,1,0,4,5,6,7] 414 ; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,0,4,5,6,7] [all …]
|
D | pr2585.ll | 11 ; X32-NEXT: pshuflw {{.*#+}} xmm0 = mem[0,2,2,3,4,5,6,7] 19 ; X64-NEXT: pshuflw {{.*#+}} xmm0 = mem[0,2,2,3,4,5,6,7]
|
D | sse3.ll | 47 ; X64-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,1,2,1,4,5,6,7] 61 ; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,1,4,5,6,7] 102 ; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,3,2,4,5,6,7] 112 ; X64-NEXT: pshuflw {{.*#+}} xmm0 = mem[2,1,0,3,4,5,6,7] 176 ; X64-NEXT: pshuflw {{.*#+}} xmm0 = mem[0,2,2,3,4,5,6,7] 209 ; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] 222 ; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,2,2,3,4,5,6,7] 247 ; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,2,4,5,6,7]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | bswap-vector.ll | 17 ; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,0,3,2,4,5,6,7] 20 ; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] 50 ; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7] 53 ; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7] 84 ; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7] 88 ; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7] 122 ; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,0,3,2,4,5,6,7] 125 ; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] 130 ; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,0,3,2,4,5,6,7] 133 ; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,0,3,2,4,5,6,7] [all …]
|
D | sse3.ll | 69 ; X86-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,1,2,1,4,5,6,7] 78 ; X64-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,1,2,1,4,5,6,7] 92 ; X86-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,1,4,5,6,7] 101 ; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,1,4,5,6,7] 161 ; X86-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,3,2,4,5,6,7] 167 ; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,3,2,4,5,6,7] 179 ; X86-NEXT: pshuflw {{.*#+}} xmm0 = mem[2,1,0,3,4,5,6,7] 186 ; X64-NEXT: pshuflw {{.*#+}} xmm0 = mem[2,1,0,3,4,5,6,7] 258 ; X86-NEXT: pshuflw {{.*#+}} xmm0 = mem[0,2,2,3,4,5,6,7] 266 ; X64-NEXT: pshuflw {{.*#+}} xmm0 = mem[0,2,2,3,4,5,6,7] [all …]
|
D | combine-udiv.ll | 188 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7] 192 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7] 196 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7] 199 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7] 235 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7] 239 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7] 243 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7] 246 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7] 284 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7] 288 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7] [all …]
|
D | combine-sra.ll | 153 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7] 157 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7] 161 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7] 164 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7] 276 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7] 280 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7] 284 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7] 287 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
|
D | swizzle-2.ll | 303 ; legal shuffle (either pshuflw or pshufb depending on the masks). 308 ; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,2,0,4,5,6,7] 318 ; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,2,0,4,5,6,7] 338 ; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,3,0,4,5,6,7] 349 ; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,0,2,4,5,6,7] 360 ; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,0,3,4,5,6,7] 371 ; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,1,4,5,6,7] 392 ; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,0,3,4,5,6,7] 403 ; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,3,1,0,4,5,6,7] 414 ; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,0,4,5,6,7] [all …]
|
D | vector-shuffle-128-v8.ll | 66 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7] 86 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] 149 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,0,4,5,6,7] 187 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] 224 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,3,0,2,4,5,6,7] 299 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,0,4,5,6,7] 325 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] 350 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7] 375 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7] 400 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,0,4,5,6,7] [all …]
|
D | pr2585.ll | 11 ; X32-NEXT: pshuflw {{.*#+}} xmm0 = mem[0,2,2,3,4,5,6,7] 19 ; X64-NEXT: pshuflw {{.*#+}} xmm0 = mem[0,2,2,3,4,5,6,7]
|
D | bitcast-and-setcc-512.ll | 17 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,0,2,4,5,6,7] 20 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,0,2,4,5,6,7] 24 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7] 27 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] 32 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,0,2,4,5,6,7] 35 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,0,2,4,5,6,7] 39 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7] 42 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,2,2,3,4,5,6,7] 142 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm7[0,1,0,2,4,5,6,7] 145 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm6[0,1,0,2,4,5,6,7] [all …]
|
D | shuffle-strided-with-offset-128.ll | 19 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,1,2,3,4,5,6,7] 22 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,0,3,2,4,5,6,7] 24 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7] 27 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] 82 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = mem[3,1,2,3,4,5,6,7] 85 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] 184 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,3,2,3,4,5,6,7] 187 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7] 244 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7] 247 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] [all …]
|
D | funnel-shift-rot.ll | 99 ; X32-SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm3[2,3,3,3,4,5,6,7] 102 ; X32-SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm3[0,1,1,1,4,5,6,7] 107 ; X32-SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm3[2,3,3,3,4,5,6,7] 110 ; X32-SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,1,1,4,5,6,7] 277 ; X32-SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm4[2,3,3,3,4,5,6,7] 280 ; X32-SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm4[0,1,1,1,4,5,6,7] 285 ; X32-SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm4[2,3,3,3,4,5,6,7] 288 ; X32-SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,1,1,1,4,5,6,7]
|
D | vector-bitreverse.ll | 306 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,0,3,2,4,5,6,7] 309 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] 383 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7] 386 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7] 461 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7] 465 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7] 690 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,0,3,2,4,5,6,7] 693 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] 730 ; SSE2-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[1,0,3,2,4,5,6,7] 733 ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,0,3,2,4,5,6,7] [all …]
|
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
D | sse3.ll | 20 ; X64: pshuflw $0, %xmm0, %xmm0 55 ; X64: pshuflw $44, %xmm0, %xmm0 94 ; X64: pshuflw $-80, %xmm0, %xmm0 122 ; X64: pshuflw $-58, (%rsi), %xmm0 172 ; X64: pshuflw $8, [[X1]], [[X2:%xmm[0-9]+]] 188 ; X64: pshuflw $1, %xmm0, %xmm0 240 ; X64: pshuflw $-128, %xmm0, %xmm0
|