• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; rdar://7860110
2; RUN: llc -asm-verbose=false < %s | FileCheck %s -check-prefix=X64
3; RUN: llc -march=x86 -asm-verbose=false < %s | FileCheck %s -check-prefix=X32
4target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
5target triple = "x86_64-apple-darwin10.2"
6
7define void @test1(i32* nocapture %a0, i8 zeroext %a1) nounwind ssp {
8entry:
9  %A = load i32* %a0, align 4
10  %B = and i32 %A, -256     ; 0xFFFFFF00
11  %C = zext i8 %a1 to i32
12  %D = or i32 %C, %B
13  store i32 %D, i32* %a0, align 4
14  ret void
15
16; X64: test1:
17; X64: movb	%sil, (%rdi)
18
19; X32: test1:
20; X32: movb	8(%esp), %al
21; X32: movb	%al, (%{{.*}})
22}
23
24define void @test2(i32* nocapture %a0, i8 zeroext %a1) nounwind ssp {
25entry:
26  %A = load i32* %a0, align 4
27  %B = and i32 %A, -65281    ; 0xFFFF00FF
28  %C = zext i8 %a1 to i32
29  %CS = shl i32 %C, 8
30  %D = or i32 %B, %CS
31  store i32 %D, i32* %a0, align 4
32  ret void
33; X64: test2:
34; X64: movb	%sil, 1(%rdi)
35
36; X32: test2:
37; X32: movb	8(%esp), %al
38; X32: movb	%al, 1(%{{.*}})
39}
40
41define void @test3(i32* nocapture %a0, i16 zeroext %a1) nounwind ssp {
42entry:
43  %A = load i32* %a0, align 4
44  %B = and i32 %A, -65536    ; 0xFFFF0000
45  %C = zext i16 %a1 to i32
46  %D = or i32 %B, %C
47  store i32 %D, i32* %a0, align 4
48  ret void
49; X64: test3:
50; X64: movw	%si, (%rdi)
51
52; X32: test3:
53; X32: movw	8(%esp), %ax
54; X32: movw	%ax, (%{{.*}})
55}
56
57define void @test4(i32* nocapture %a0, i16 zeroext %a1) nounwind ssp {
58entry:
59  %A = load i32* %a0, align 4
60  %B = and i32 %A, 65535    ; 0x0000FFFF
61  %C = zext i16 %a1 to i32
62  %CS = shl i32 %C, 16
63  %D = or i32 %B, %CS
64  store i32 %D, i32* %a0, align 4
65  ret void
66; X64: test4:
67; X64: movw	%si, 2(%rdi)
68
69; X32: test4:
70; X32: movl	8(%esp), %eax
71; X32: movw	%ax, 2(%{{.*}})
72}
73
74define void @test5(i64* nocapture %a0, i16 zeroext %a1) nounwind ssp {
75entry:
76  %A = load i64* %a0, align 4
77  %B = and i64 %A, -4294901761    ; 0xFFFFFFFF0000FFFF
78  %C = zext i16 %a1 to i64
79  %CS = shl i64 %C, 16
80  %D = or i64 %B, %CS
81  store i64 %D, i64* %a0, align 4
82  ret void
83; X64: test5:
84; X64: movw	%si, 2(%rdi)
85
86; X32: test5:
87; X32: movzwl	8(%esp), %eax
88; X32: movw	%ax, 2(%{{.*}})
89}
90
91define void @test6(i64* nocapture %a0, i8 zeroext %a1) nounwind ssp {
92entry:
93  %A = load i64* %a0, align 4
94  %B = and i64 %A, -280375465082881    ; 0xFFFF00FFFFFFFFFF
95  %C = zext i8 %a1 to i64
96  %CS = shl i64 %C, 40
97  %D = or i64 %B, %CS
98  store i64 %D, i64* %a0, align 4
99  ret void
100; X64: test6:
101; X64: movb	%sil, 5(%rdi)
102
103
104; X32: test6:
105; X32: movb	8(%esp), %al
106; X32: movb	%al, 5(%{{.*}})
107}
108
109define i32 @test7(i64* nocapture %a0, i8 zeroext %a1, i32* %P2) nounwind {
110entry:
111  %OtherLoad = load i32 *%P2
112  %A = load i64* %a0, align 4
113  %B = and i64 %A, -280375465082881    ; 0xFFFF00FFFFFFFFFF
114  %C = zext i8 %a1 to i64
115  %CS = shl i64 %C, 40
116  %D = or i64 %B, %CS
117  store i64 %D, i64* %a0, align 4
118  ret i32 %OtherLoad
119; X64: test7:
120; X64: movb	%sil, 5(%rdi)
121
122
123; X32: test7:
124; X32: movb	8(%esp), %cl
125; X32: movb	%cl, 5(%{{.*}})
126}
127
128; PR7833
129
130@g_16 = internal global i32 -1
131
132; X64: test8:
133; X64-NEXT: movl _g_16(%rip), %eax
134; X64-NEXT: movl $0, _g_16(%rip)
135; X64-NEXT: orl  $1, %eax
136; X64-NEXT: movl %eax, _g_16(%rip)
137; X64-NEXT: ret
138define void @test8() nounwind {
139  %tmp = load i32* @g_16
140  store i32 0, i32* @g_16
141  %or = or i32 %tmp, 1
142  store i32 %or, i32* @g_16
143  ret void
144}
145
146; X64: test9:
147; X64-NEXT: orb $1, _g_16(%rip)
148; X64-NEXT: ret
149define void @test9() nounwind {
150  %tmp = load i32* @g_16
151  %or = or i32 %tmp, 1
152  store i32 %or, i32* @g_16
153  ret void
154}
155
156; rdar://8494845 + PR8244
157; X64: test10:
158; X64-NEXT: movsbl	(%rdi), %eax
159; X64-NEXT: shrl	$8, %eax
160; X64-NEXT: ret
161define i8 @test10(i8* %P) nounwind ssp {
162entry:
163  %tmp = load i8* %P, align 1
164  %conv = sext i8 %tmp to i32
165  %shr3 = lshr i32 %conv, 8
166  %conv2 = trunc i32 %shr3 to i8
167  ret i8 %conv2
168}
169