/external/llvm/test/CodeGen/X86/ |
D | vector-idiv.ll | 64 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] 65 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] 68 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] 77 ; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] 78 ; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] 81 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
|
D | vector-idiv-sdiv-256.ll | 92 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] 94 ; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[1,1,3,3] 97 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] 103 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] 104 ; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] 107 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] 119 ; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7] 120 ; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7] 123 ; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,1,3,3,5,5,7,7] 169 ; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] [all …]
|
D | vector-trunc.ll | 43 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,2] 44 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] 47 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,2] 48 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] 55 ; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,0,2,4,6,4,6] 57 ; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,0,2,4,6,4,6] 139 ; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,0,2,4,6,4,6] 141 ; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,0,2,4,6,4,6] 193 ; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,0,2,4,6,4,6] 195 ; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,0,2,4,6,4,6] [all …]
|
D | vector-idiv-udiv-256.ll | 99 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] 100 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] 103 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] 110 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] 112 ; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] 115 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] 127 ; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7] 128 ; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7] 131 ; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,1,3,3,5,5,7,7] 180 ; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] [all …]
|
D | swizzle-avx2.ll | 30 ; CHECK: vpshufd $78 32 ; CHECK-NOT: vpshufd 42 ; CHECK: vpshufd $78 44 ; CHECK-NOT: vpshufd
|
D | vector-shuffle-256-v16.ll | 11 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] 27 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] 46 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] 65 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] 84 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] 103 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] 122 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] 141 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] 163 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] 172 ; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,1,1,4,4,5,5] [all …]
|
D | 2012-01-12-extract-sv.ll | 8 ; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] 9 ; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
|
D | vector-idiv-sdiv-128.ll | 124 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] 125 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] 128 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] 139 ; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] 140 ; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] 143 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] 234 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] 422 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] 423 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] 426 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] [all …]
|
D | vector-idiv-udiv-128.ll | 119 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] 120 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] 123 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] 134 ; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] 135 ; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] 138 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] 221 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] 402 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] 403 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] 406 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] [all …]
|
D | vector-shuffle-256-v8.ll | 910 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,0,0,0] 911 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,0] 927 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,0,0,0] 928 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,0,0] 944 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,0,0,0] 945 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,0,0,0] 1036 ; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,1,0,1,4,5,4,5] 1045 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,0,1,1] 1046 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3] 1062 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,0,0,0] [all …]
|
D | viabs.ll | 453 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] 456 ; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[1,1,3,3] 463 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] 466 ; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[1,1,3,3] 476 ; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[1,1,3,3,5,5,7,7] 480 ; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[1,1,3,3,5,5,7,7] 530 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] 533 ; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[1,1,3,3] 540 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] 543 ; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[1,1,3,3] [all …]
|
D | avx-trunc.ll | 8 ; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2] 9 ; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
D | vector-trunc-math.ll | 28 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,2] 29 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[0,2,2,3] 37 ; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,0,2,4,6,4,6] 103 ; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,0,2,4,6,4,6] 105 ; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,0,2,4,6,4,6] 241 ; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,2,0,2,4,6,4,6] 243 ; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,0,2,4,6,4,6] 251 ; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,0,2,4,6,4,6] 253 ; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,0,2,4,6,4,6] 419 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] [all …]
|
D | vselect-avx.ll | 47 ; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] 78 ; CHECK-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[1,1,3,3] 79 ; CHECK-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] 82 ; CHECK-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
|
D | vector-shuffle-combining.ll | 101 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] 179 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3] 197 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3] 215 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3] 233 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3] 251 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3] 269 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3] 547 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3] 554 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3] 590 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3] [all …]
|
D | vector-shuffle-128-v4.ll | 20 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,1] 33 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,2,0] 46 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,2] 59 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,3,0,0] 72 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,0,0] 85 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,0,0] 98 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,3,3,0] 111 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,2,1,0] 125 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,2,1] 361 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,0] [all …]
|
D | avx-splat.ll | 19 ; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3] 31 ; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] 104 ; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,0]
|
D | avx-vbroadcast.ll | 159 ; X32-NEXT: vpshufd {{.*#+}} xmm0 = mem[1,1,1,1] 164 ; X64-NEXT: vpshufd {{.*#+}} xmm0 = mem[1,1,1,1] 261 ; X32-NEXT: vpshufd {{.*#+}} xmm0 = mem[2,3,2,3] 266 ; X64-NEXT: vpshufd {{.*#+}} xmm0 = mem[2,3,2,3] 376 ; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] 388 ; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] 393 ; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
|
/external/libvpx/libvpx/third_party/libyuv/source/ |
D | compare_win.cc | 91 vpshufd ymm1, ymm0, 0xee // 3, 2 + 1, 0 both lanes. in SumSquareError_AVX2() 93 vpshufd ymm1, ymm0, 0x01 // 1 + 0 both lanes. in SumSquareError_AVX2() 202 vpshufd xmm2, xmm1, 0x0e // upper 2 dwords in HashDjb2_AVX2() 204 vpshufd xmm2, xmm1, 0x01 in HashDjb2_AVX2()
|
/external/libyuv/files/source/ |
D | compare_win.cc | 91 vpshufd ymm1, ymm0, 0xee // 3, 2 + 1, 0 both lanes. in SumSquareError_AVX2() 93 vpshufd ymm1, ymm0, 0x01 // 1 + 0 both lanes. in SumSquareError_AVX2() 202 vpshufd xmm2, xmm1, 0x0e // upper 2 dwords in HashDjb2_AVX2() 204 vpshufd xmm2, xmm1, 0x01 in HashDjb2_AVX2()
|
/external/boringssl/mac-x86_64/crypto/chacha/ |
D | chacha-x86_64.S | 1010 vpshufd $0x00,%ymm11,%ymm8 1011 vpshufd $0x55,%ymm11,%ymm9 1013 vpshufd $0xaa,%ymm11,%ymm10 1015 vpshufd $0xff,%ymm11,%ymm11 1019 vpshufd $0x00,%ymm3,%ymm0 1020 vpshufd $0x55,%ymm3,%ymm1 1022 vpshufd $0xaa,%ymm3,%ymm2 1024 vpshufd $0xff,%ymm3,%ymm3 1028 vpshufd $0x00,%ymm15,%ymm12 1029 vpshufd $0x55,%ymm15,%ymm13 [all …]
|
/external/boringssl/linux-x86_64/crypto/chacha/ |
D | chacha-x86_64.S | 1011 vpshufd $0x00,%ymm11,%ymm8 1012 vpshufd $0x55,%ymm11,%ymm9 1014 vpshufd $0xaa,%ymm11,%ymm10 1016 vpshufd $0xff,%ymm11,%ymm11 1020 vpshufd $0x00,%ymm3,%ymm0 1021 vpshufd $0x55,%ymm3,%ymm1 1023 vpshufd $0xaa,%ymm3,%ymm2 1025 vpshufd $0xff,%ymm3,%ymm3 1029 vpshufd $0x00,%ymm15,%ymm12 1030 vpshufd $0x55,%ymm15,%ymm13 [all …]
|
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
D | avx-shuffle.ll | 9 ; CHECK: vpshufd
|
D | avx-basic.ll | 85 ; CHECK: vpshufd $-96 86 ; CHECK: vpshufd $-6
|
/external/llvm/test/MC/X86/ |
D | shuffle-comments.s | 53 vpshufd $27, %xmm0, %xmm1 label 55 vpshufd $27, (%rax), %xmm1 label 58 vpshufd $27, %ymm0, %ymm1 label 60 vpshufd $27, (%rax), %ymm1 label
|