/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | vector-reduce-add.ll | 23 ; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 30 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 51 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 61 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 71 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 98 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 109 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 121 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 158 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 171 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] [all …]
|
D | vector-reduce-and.ll | 23 ; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 30 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 61 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 71 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 107 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 119 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 163 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 176 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 201 ; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 203 ; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] [all …]
|
D | vector-reduce-or.ll | 23 ; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 30 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 61 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 71 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 107 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 119 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 163 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 176 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 201 ; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 203 ; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] [all …]
|
D | vector-reduce-xor.ll | 23 ; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 30 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 61 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 71 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 107 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 119 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 163 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 176 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 201 ; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 203 ; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] [all …]
|
D | mulvi32.ll | 58 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] 59 ; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] 121 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] 122 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] 124 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,2,2] 131 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] 132 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] 134 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,2,2] 183 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3] 184 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,2,3,3] [all …]
|
D | vector-reduce-mul.ll | 34 ; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 48 ; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 62 ; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 77 ; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 85 ; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 131 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 155 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 179 ; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 203 ; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 221 ; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] [all …]
|
D | vector-idiv.ll | 62 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] 66 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] 75 ; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] 76 ; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] 79 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
|
D | vector-idiv-sdiv-256.ll | 93 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] 95 ; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[1,1,3,3] 98 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] 104 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] 105 ; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] 108 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] 120 ; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7] 121 ; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7] 124 ; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,1,3,3,5,5,7,7] 170 ; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] [all …]
|
D | vector-idiv-udiv-256.ll | 100 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] 101 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] 104 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] 111 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] 113 ; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] 116 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] 128 ; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7] 129 ; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7] 132 ; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,1,3,3,5,5,7,7] 181 ; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] [all …]
|
D | vector-shuffle-256-v16.ll | 12 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] 28 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0] 30 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,1] 37 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,1] 50 ; AVX512VL-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,1] 67 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0] 69 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,0] 76 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,0] 89 ; AVX512VL-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,0] 106 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0] [all …]
|
D | bitcast-int-to-vector-bool-sext.ll | 30 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] 70 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] 111 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] 212 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] 263 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] 315 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] 368 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] 370 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] 388 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] 390 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] [all …]
|
D | known-bits-vector.ll | 54 ; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,2,2] 64 ; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,2,2] 80 ; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] 87 ; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] 100 ; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] 107 ; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] 121 ; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] 128 ; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] 425 ; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] 432 ; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] [all …]
|
D | bitcast-int-to-vector-bool.ll | 26 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] 68 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] 111 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] 205 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] 207 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] 230 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] 232 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
|
D | vector-compare-all_of.ll | 112 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 114 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] 249 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 251 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] 284 ; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 292 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 346 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 407 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 409 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] 450 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] [all …]
|
D | vector-compare-any_of.ll | 108 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 110 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] 233 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 235 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] 268 ; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 276 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 326 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 384 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] 386 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] 423 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] [all …]
|
D | bitcast-int-to-vector-bool-zext.ll | 32 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] 84 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] 137 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] 268 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] 332 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] 397 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] 467 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] 469 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] 492 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] 494 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] [all …]
|
D | bitcast-and-setcc-128.ll | 247 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] 252 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] 258 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] 263 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] 276 ; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] 281 ; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] 287 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] 292 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] 400 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] 405 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | vector-idiv.ll | 64 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] 65 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] 68 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] 77 ; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] 78 ; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] 81 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
|
D | vector-idiv-sdiv-256.ll | 92 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] 94 ; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[1,1,3,3] 97 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] 103 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] 104 ; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] 107 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] 119 ; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7] 120 ; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7] 123 ; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,1,3,3,5,5,7,7] 169 ; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] [all …]
|
D | vector-trunc.ll | 43 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,2] 44 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] 47 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,2] 48 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] 55 ; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,0,2,4,6,4,6] 57 ; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,0,2,4,6,4,6] 139 ; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,0,2,4,6,4,6] 141 ; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,0,2,4,6,4,6] 193 ; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,0,2,4,6,4,6] 195 ; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,0,2,4,6,4,6] [all …]
|
D | vector-idiv-udiv-256.ll | 99 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] 100 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] 103 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] 110 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] 112 ; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] 115 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] 127 ; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7] 128 ; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7] 131 ; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,1,3,3,5,5,7,7] 180 ; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] [all …]
|
D | swizzle-avx2.ll | 30 ; CHECK: vpshufd $78 32 ; CHECK-NOT: vpshufd 42 ; CHECK: vpshufd $78 44 ; CHECK-NOT: vpshufd
|
D | vector-shuffle-256-v16.ll | 11 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] 27 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] 46 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] 65 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] 84 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] 103 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] 122 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] 141 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] 163 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] 172 ; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,1,1,4,4,5,5] [all …]
|
/external/libyuv/files/source/ |
D | compare_win.cc | 91 vpshufd ymm1, ymm0, 0xee // 3, 2 + 1, 0 both lanes. in SumSquareError_AVX2() 93 vpshufd ymm1, ymm0, 0x01 // 1 + 0 both lanes. in SumSquareError_AVX2() 202 vpshufd xmm2, xmm1, 0x0e // upper 2 dwords in HashDjb2_AVX2() 204 vpshufd xmm2, xmm1, 0x01 in HashDjb2_AVX2()
|
/external/libvpx/libvpx/third_party/libyuv/source/ |
D | compare_win.cc | 110 vpshufd ymm1, ymm0, 0xee // 3, 2 + 1, 0 both lanes. in SumSquareError_AVX2() 112 vpshufd ymm1, ymm0, 0x01 // 1 + 0 both lanes. in SumSquareError_AVX2() 221 vpshufd xmm2, xmm1, 0x0e // upper 2 dwords in HashDjb2_AVX2() 223 vpshufd xmm2, xmm1, 0x01 in HashDjb2_AVX2()
|