• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s -check-prefix=X86-64
2; RUN: llc < %s -mtriple=x86_64-win32 | FileCheck %s -check-prefix=WIN64
3; RUN: llc < %s -march=x86    | FileCheck %s -check-prefix=X86-32
4
5; Use h registers. On x86-64, codegen doesn't support general allocation
6; of h registers yet, due to x86 encoding complications.
7
8define void @bar64(i64 inreg %x, i8* inreg %p) nounwind {
9; X86-64: bar64:
10; X86-64: shrq $8, %rdi
11; X86-64: incb %dil
12
13; See FIXME: on regclass GR8.
14; It could be optimally transformed like; incb %ch; movb %ch, (%rdx)
15; WIN64:  bar64:
16; WIN64:  shrq $8, %rcx
17; WIN64:  incb %cl
18
19; X86-32: bar64:
20; X86-32: incb %ah
21  %t0 = lshr i64 %x, 8
22  %t1 = trunc i64 %t0 to i8
23  %t2 = add i8 %t1, 1
24  store i8 %t2, i8* %p
25  ret void
26}
27
28define void @bar32(i32 inreg %x, i8* inreg %p) nounwind {
29; X86-64: bar32:
30; X86-64: shrl $8, %edi
31; X86-64: incb %dil
32
33; WIN64:  bar32:
34; WIN64:  shrl $8, %ecx
35; WIN64:  incb %cl
36
37; X86-32: bar32:
38; X86-32: incb %ah
39  %t0 = lshr i32 %x, 8
40  %t1 = trunc i32 %t0 to i8
41  %t2 = add i8 %t1, 1
42  store i8 %t2, i8* %p
43  ret void
44}
45
46define void @bar16(i16 inreg %x, i8* inreg %p) nounwind {
47; X86-64: bar16:
48; X86-64: shrl $8, %edi
49; X86-64: incb %dil
50
51; WIN64:  bar16:
52; WIN64:  shrl $8, %ecx
53; WIN64:  incb %cl
54
55; X86-32: bar16:
56; X86-32: incb %ah
57  %t0 = lshr i16 %x, 8
58  %t1 = trunc i16 %t0 to i8
59  %t2 = add i8 %t1, 1
60  store i8 %t2, i8* %p
61  ret void
62}
63
64define i64 @qux64(i64 inreg %x) nounwind {
65; X86-64: qux64:
66; X86-64: movq %rdi, %rax
67; X86-64: movzbl %ah, %eax
68
69; WIN64:  qux64:
70; WIN64:  movzbl %ch, %eax
71
72; X86-32: qux64:
73; X86-32: movzbl %ah, %eax
74  %t0 = lshr i64 %x, 8
75  %t1 = and i64 %t0, 255
76  ret i64 %t1
77}
78
79define i32 @qux32(i32 inreg %x) nounwind {
80; X86-64: qux32:
81; X86-64: movl %edi, %eax
82; X86-64: movzbl %ah, %eax
83
84; WIN64:  qux32:
85; WIN64:  movzbl %ch, %eax
86
87; X86-32: qux32:
88; X86-32: movzbl %ah, %eax
89  %t0 = lshr i32 %x, 8
90  %t1 = and i32 %t0, 255
91  ret i32 %t1
92}
93
94define i16 @qux16(i16 inreg %x) nounwind {
95; X86-64: qux16:
96; X86-64: movl %edi, %eax
97; X86-64: movzbl %ah, %eax
98
99; WIN64:  qux16:
100; WIN64:  movzbl %ch, %eax
101
102; X86-32: qux16:
103; X86-32: movzbl %ah, %eax
104  %t0 = lshr i16 %x, 8
105  ret i16 %t0
106}
107