Home
last modified time | relevance | path

Searched refs:vmovddup (Results 1 – 25 of 105) sorted by relevance

12345

/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/avx512-shuffles/
Dduplicate-low.ll7 ; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
17 ; CHECK-NEXT: vmovddup {{.*#+}} xmm1 {%k1} = xmm0[0,0]
31 ; CHECK-NEXT: vmovddup {{.*#+}} xmm0 {%k1} {z} = xmm0[0,0]
43 ; CHECK-NEXT: vmovddup {{.*#+}} xmm1 {%k1} = xmm0[0,0]
57 ; CHECK-NEXT: vmovddup {{.*#+}} xmm0 {%k1} {z} = xmm0[0,0]
67 ; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
78 ; CHECK-NEXT: vmovddup {{.*#+}} xmm0 {%k1} = mem[0,0]
92 ; CHECK-NEXT: vmovddup {{.*#+}} xmm0 {%k1} {z} = mem[0,0]
105 ; CHECK-NEXT: vmovddup {{.*#+}} xmm0 {%k1} = mem[0,0]
119 ; CHECK-NEXT: vmovddup {{.*#+}} xmm0 {%k1} {z} = mem[0,0]
[all …]
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/
Davx-vmovddup.ll3 ; CHECK: vmovddup %ymm
9 ; CHECK: vmovddup (%
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/
Dvector-shuffle-combining-avx.ll41 ; X32-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
46 ; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
55 ; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
60 ; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
226 ; X32-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
231 ; X64-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
240 ; X32-NEXT: vmovddup {{.*#+}} ymm0 = mem[0,0,2,2]
245 ; X64-NEXT: vmovddup {{.*#+}} ymm0 = mem[0,0,2,2]
296 ; X32-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
301 ; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
[all …]
Dsplat-for-size.ll12 ; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
49 ; AVX can't do integer splats, so fake it: use vmovddup to splat 64-bit value.
50 ; We also generate vmovddup for AVX2 because it's one byte smaller than vpbroadcastq.
54 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
68 ; and then we fake it: use vmovddup to splat 64-bit value.
73 ; AVX-NEXT: vmovddup {{.*#+}} xmm2 = mem[0,0]
Dsse3-intrinsics-fast-isel.ll140 ; X86-AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
150 ; X64-AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
166 ; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
Dbroadcast-elm-cross-splat-vec.ll90 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
97 ; ALL32-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
104 ; AVX-64-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
205 ; AVX-NEXT: vmovddup {{.*#+}} xmm2 = mem[0,0]
222 ; AVX-64-NEXT: vmovddup {{.*#+}} xmm2 = mem[0,0]
427 ; AVX-NEXT: vmovddup {{.*#+}} xmm3 = mem[0,0]
459 ; AVX-64-NEXT: vmovddup {{.*#+}} xmm3 = mem[0,0]
678 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
685 ; ALL32-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
692 ; AVX-64-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
[all …]
Dsse3-schedule.ll924 ; SANDY-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] sched: [1:1.00]
925 ; SANDY-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] sched: [6:0.50]
938 ; HASWELL-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] sched: [1:1.00]
939 ; HASWELL-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] sched: [5:0.50]
952 ; BROADWELL-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] sched: [1:1.00]
953 ; BROADWELL-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] sched: [5:0.50]
966 ; SKYLAKE-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] sched: [1:1.00]
967 ; SKYLAKE-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] sched: [5:0.50]
980 ; SKX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] sched: [1:1.00]
981 ; SKX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] sched: [5:0.50]
[all …]
Dvector-shuffle-256-v4.ll11 ; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
31 ; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
53 ; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
95 ; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
115 ; AVX1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
135 ; AVX1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
155 ; AVX1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
228 ; ALL-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
237 ; ALL-NEXT: vmovddup {{.*#+}} ymm0 = mem[0,0,2,2]
304 ; ALL-NEXT: vmovddup {{.*#+}} xmm1 = xmm1[0,0]
[all …]
Dhaddsub-shuf.ll39 ; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
104 ; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
269 ; AVX1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
343 ; AVX1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
417 ; AVX1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
491 ; AVX1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
Davx-vbroadcast.ll52 ; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
170 ; X32-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
178 ; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
542 ; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
547 ; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
664 ; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
669 ; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
685 ; X32-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
692 ; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
Davx-splat.ll45 ; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
138 ; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
Davx2-vbroadcast.ll231 ; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
470 ; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
521 ; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
526 ; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
574 ; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
579 ; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
897 ; X32-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
902 ; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
Dvector-shuffle-128-v2.ll132 ; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
188 ; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm1[0,0]
304 ; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm1[0,0]
1189 ; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
1220 ; AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
1252 ; AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
Dextractelement-load.ll71 ; X64-AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
/external/llvm/test/CodeGen/X86/
Dsplat-for-size.ll12 ; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
49 ; AVX can't do integer splats, so fake it: use vmovddup to splat 64-bit value.
50 ; We also generate vmovddup for AVX2 because it's one byte smaller than vpbroadcastq.
54 ; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
62 ; and then we fake it: use vmovddup to splat 64-bit value.
67 ; AVX-NEXT: vmovddup {{.*#+}} xmm2 = mem[0,0]
Dvector-shuffle-combining-avx.ll34 ; ALL-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
42 ; ALL-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
141 ; ALL-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
149 ; ALL-NEXT: vmovddup {{.*#+}} ymm0 = mem[0,0,2,2]
186 ; ALL-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
204 ; ALL-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
Davx512vl-intrinsics-fast-isel.ll298 ; X32-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
303 ; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
320 ; X32-NEXT: vmovddup {{.*#+}} xmm0 {%k1} = xmm1[0,0]
330 ; X64-NEXT: vmovddup {{.*#+}} xmm0 {%k1} = xmm1[0,0]
350 ; X32-NEXT: vmovddup {{.*#+}} xmm0 {%k1} {z} = xmm0[0,0]
360 ; X64-NEXT: vmovddup {{.*#+}} xmm0 {%k1} {z} = xmm0[0,0]
572 ; X32-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
577 ; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
594 ; X32-NEXT: vmovddup {{.*#+}} xmm0 {%k1} = xmm1[0,0]
604 ; X64-NEXT: vmovddup {{.*#+}} xmm0 {%k1} = xmm1[0,0]
[all …]
Dvector-shuffle-256-v4.ll11 ; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
31 ; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
53 ; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
95 ; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
115 ; AVX1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
186 ; ALL-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
195 ; ALL-NEXT: vmovddup {{.*#+}} ymm0 = mem[0,0,2,2]
241 ; ALL-NEXT: vmovddup {{.*#+}} xmm1 = xmm1[0,0]
251 ; ALL-NEXT: vmovddup {{.*#+}} ymm1 = ymm1[0,0,2,2]
252 ; ALL-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
[all …]
Davx-splat.ll45 ; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
138 ; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
Davx-vbroadcast.ll312 ; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
317 ; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
404 ; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
409 ; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
Davx2-vbroadcast.ll445 ; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
496 ; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
501 ; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
549 ; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
554 ; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
847 ; X32-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
852 ; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
Dextractelement-load.ll71 ; X64-AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
/external/swiftshader/third_party/llvm-7.0/llvm/test/MC/X86/
DAVX-32.s3021 vmovddup -485498096(%edx,%eax,4), %xmm1 label
3025 vmovddup 485498096(%edx,%eax,4), %xmm1 label
3029 vmovddup -485498096(%edx,%eax,4), %ymm4 label
3033 vmovddup 485498096(%edx,%eax,4), %ymm4 label
3037 vmovddup 485498096(%edx), %xmm1 label
3041 vmovddup 485498096(%edx), %ymm4 label
3045 vmovddup 485498096, %xmm1 label
3049 vmovddup 485498096, %ymm4 label
3053 vmovddup 64(%edx,%eax), %xmm1 label
3057 vmovddup 64(%edx,%eax), %ymm4 label
[all …]
DAVX-64.s6405 vmovddup 485498096, %xmm15 label
6409 vmovddup 485498096, %xmm6 label
6413 vmovddup 485498096, %ymm7 label
6417 vmovddup 485498096, %ymm9 label
6421 vmovddup -64(%rdx,%rax,4), %xmm15 label
6425 vmovddup 64(%rdx,%rax,4), %xmm15 label
6429 vmovddup -64(%rdx,%rax,4), %xmm6 label
6433 vmovddup 64(%rdx,%rax,4), %xmm6 label
6437 vmovddup -64(%rdx,%rax,4), %ymm7 label
6441 vmovddup 64(%rdx,%rax,4), %ymm7 label
[all …]
/external/swiftshader/third_party/LLVM/test/MC/X86/
Dx86-32-avx.s1524 vmovddup %xmm1, %xmm2
1528 vmovddup (%eax), %xmm2
3128 vmovddup %ymm2, %ymm5
3132 vmovddup (%eax), %ymm2

12345