• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X32-SSE2
3; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=X64-SSSE3
4; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=X64-AVX
5
6target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
7
8define i32 @t(<2 x i64>* %val) nounwind  {
9; X32-SSE2-LABEL: t:
10; X32-SSE2:       # BB#0:
11; X32-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
12; X32-SSE2-NEXT:    movl 8(%eax), %eax
13; X32-SSE2-NEXT:    retl
14;
15; X64-SSSE3-LABEL: t:
16; X64-SSSE3:       # BB#0:
17; X64-SSSE3-NEXT:    movl 8(%rdi), %eax
18; X64-SSSE3-NEXT:    retq
19;
20; X64-AVX-LABEL: t:
21; X64-AVX:       # BB#0:
22; X64-AVX-NEXT:    movl 8(%rdi), %eax
23; X64-AVX-NEXT:    retq
24  %tmp2 = load <2 x i64>, <2 x i64>* %val, align 16		; <<2 x i64>> [#uses=1]
25  %tmp3 = bitcast <2 x i64> %tmp2 to <4 x i32>		; <<4 x i32>> [#uses=1]
26  %tmp4 = extractelement <4 x i32> %tmp3, i32 2		; <i32> [#uses=1]
27  ret i32 %tmp4
28}
29
30; Case where extractelement of load ends up as undef.
31; (Making sure this doesn't crash.)
32define i32 @t2(<8 x i32>* %xp) {
33; X32-SSE2-LABEL: t2:
34; X32-SSE2:       # BB#0:
35; X32-SSE2-NEXT:    retl
36;
37; X64-SSSE3-LABEL: t2:
38; X64-SSSE3:       # BB#0:
39; X64-SSSE3-NEXT:    retq
40;
41; X64-AVX-LABEL: t2:
42; X64-AVX:       # BB#0:
43; X64-AVX-NEXT:    retq
44  %x = load <8 x i32>, <8 x i32>* %xp
45  %Shuff68 = shufflevector <8 x i32> %x, <8 x i32> undef, <8 x i32> <i32 undef, i32 7, i32 9, i32 undef, i32 13, i32 15, i32 1, i32 3>
46  %y = extractelement <8 x i32> %Shuff68, i32 0
47  ret i32 %y
48}
49
50; This case could easily end up inf-looping in the DAG combiner due to an
51; low alignment load of the vector which prevents us from reliably forming a
52; narrow load.
53
54; The expected codegen is identical for the AVX case except
55; load/store instructions will have a leading 'v', so we don't
56; need to special-case the checks.
57
58define void @t3() {
59; X32-SSE2-LABEL: t3:
60; X32-SSE2:       # BB#0: # %bb
61; X32-SSE2-NEXT:    movupd (%eax), %xmm0
62; X32-SSE2-NEXT:    movhpd %xmm0, (%eax)
63;
64; X64-SSSE3-LABEL: t3:
65; X64-SSSE3:       # BB#0: # %bb
66; X64-SSSE3-NEXT:    movddup {{.*#+}} xmm0 = mem[0,0]
67; X64-SSSE3-NEXT:    movlpd %xmm0, (%rax)
68;
69; X64-AVX-LABEL: t3:
70; X64-AVX:       # BB#0: # %bb
71; X64-AVX-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
72; X64-AVX-NEXT:    vmovlpd %xmm0, (%rax)
73bb:
74  %tmp13 = load <2 x double>, <2 x double>* undef, align 1
75  %.sroa.3.24.vec.extract = extractelement <2 x double> %tmp13, i32 1
76  store double %.sroa.3.24.vec.extract, double* undef, align 8
77  unreachable
78}
79
80; Case where a load is unary shuffled, then bitcast (to a type with the same
81; number of elements) before extractelement.
82; This is testing for an assertion - the extraction was assuming that the undef
83; second shuffle operand was a post-bitcast type instead of a pre-bitcast type.
84define i64 @t4(<2 x double>* %a) {
85; X32-SSE2-LABEL: t4:
86; X32-SSE2:       # BB#0:
87; X32-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
88; X32-SSE2-NEXT:    movapd (%eax), %xmm0
89; X32-SSE2-NEXT:    shufpd {{.*#+}} xmm0 = xmm0[1,0]
90; X32-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
91; X32-SSE2-NEXT:    movd %xmm1, %eax
92; X32-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
93; X32-SSE2-NEXT:    movd %xmm0, %edx
94; X32-SSE2-NEXT:    retl
95;
96; X64-SSSE3-LABEL: t4:
97; X64-SSSE3:       # BB#0:
98; X64-SSSE3-NEXT:    movq (%rdi), %rax
99; X64-SSSE3-NEXT:    retq
100;
101; X64-AVX-LABEL: t4:
102; X64-AVX:       # BB#0:
103; X64-AVX-NEXT:    movq (%rdi), %rax
104; X64-AVX-NEXT:    retq
105  %b = load <2 x double>, <2 x double>* %a, align 16
106  %c = shufflevector <2 x double> %b, <2 x double> %b, <2 x i32> <i32 1, i32 0>
107  %d = bitcast <2 x double> %c to <2 x i64>
108  %e = extractelement <2 x i64> %d, i32 1
109  ret i64 %e
110}
111
112