• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown-linux-gnu | FileCheck %s --check-prefix=X86
3; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefix=X64
4
5define void @i24_or(i24* %a) {
6; X86-LABEL: i24_or:
7; X86:       # %bb.0:
8; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
9; X86-NEXT:    movzwl (%ecx), %edx
10; X86-NEXT:    movzbl 2(%ecx), %eax
11; X86-NEXT:    movb %al, 2(%ecx)
12; X86-NEXT:    shll $16, %eax
13; X86-NEXT:    orl %edx, %eax
14; X86-NEXT:    orl $384, %eax # imm = 0x180
15; X86-NEXT:    movw %ax, (%ecx)
16; X86-NEXT:    retl
17;
18; X64-LABEL: i24_or:
19; X64:       # %bb.0:
20; X64-NEXT:    movzwl (%rdi), %eax
21; X64-NEXT:    movzbl 2(%rdi), %ecx
22; X64-NEXT:    movb %cl, 2(%rdi)
23; X64-NEXT:    shll $16, %ecx
24; X64-NEXT:    orl %eax, %ecx
25; X64-NEXT:    orl $384, %ecx # imm = 0x180
26; X64-NEXT:    movw %cx, (%rdi)
27; X64-NEXT:    retq
28  %aa = load i24, i24* %a, align 1
29  %b = or i24 %aa, 384
30  store i24 %b, i24* %a, align 1
31  ret void
32}
33
34define void @i24_and_or(i24* %a) {
35; X86-LABEL: i24_and_or:
36; X86:       # %bb.0:
37; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
38; X86-NEXT:    movzwl (%ecx), %edx
39; X86-NEXT:    movzbl 2(%ecx), %eax
40; X86-NEXT:    movb %al, 2(%ecx)
41; X86-NEXT:    shll $16, %eax
42; X86-NEXT:    orl %edx, %eax
43; X86-NEXT:    orl $384, %eax # imm = 0x180
44; X86-NEXT:    andl $16777088, %eax # imm = 0xFFFF80
45; X86-NEXT:    movw %ax, (%ecx)
46; X86-NEXT:    retl
47;
48; X64-LABEL: i24_and_or:
49; X64:       # %bb.0:
50; X64-NEXT:    movzwl (%rdi), %eax
51; X64-NEXT:    movzbl 2(%rdi), %ecx
52; X64-NEXT:    movb %cl, 2(%rdi)
53; X64-NEXT:    shll $16, %ecx
54; X64-NEXT:    orl %eax, %ecx
55; X64-NEXT:    orl $384, %ecx # imm = 0x180
56; X64-NEXT:    andl $16777088, %ecx # imm = 0xFFFF80
57; X64-NEXT:    movw %cx, (%rdi)
58; X64-NEXT:    retq
59  %b = load i24, i24* %a, align 1
60  %c = and i24 %b, -128
61  %d = or i24 %c, 384
62  store i24 %d, i24* %a, align 1
63  ret void
64}
65
66define void @i24_insert_bit(i24* %a, i1 zeroext %bit) {
67; X86-LABEL: i24_insert_bit:
68; X86:       # %bb.0:
69; X86-NEXT:    pushl %esi
70; X86-NEXT:    .cfi_def_cfa_offset 8
71; X86-NEXT:    .cfi_offset %esi, -8
72; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
73; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %edx
74; X86-NEXT:    movzwl (%ecx), %esi
75; X86-NEXT:    movzbl 2(%ecx), %eax
76; X86-NEXT:    movb %al, 2(%ecx)
77; X86-NEXT:    shll $16, %eax
78; X86-NEXT:    orl %esi, %eax
79; X86-NEXT:    shll $13, %edx
80; X86-NEXT:    andl $16769023, %eax # imm = 0xFFDFFF
81; X86-NEXT:    orl %edx, %eax
82; X86-NEXT:    movw %ax, (%ecx)
83; X86-NEXT:    popl %esi
84; X86-NEXT:    .cfi_def_cfa_offset 4
85; X86-NEXT:    retl
86;
87; X64-LABEL: i24_insert_bit:
88; X64:       # %bb.0:
89; X64-NEXT:    movzwl (%rdi), %eax
90; X64-NEXT:    movzbl 2(%rdi), %ecx
91; X64-NEXT:    movb %cl, 2(%rdi)
92; X64-NEXT:    shll $16, %ecx
93; X64-NEXT:    orl %eax, %ecx
94; X64-NEXT:    shll $13, %esi
95; X64-NEXT:    andl $16769023, %ecx # imm = 0xFFDFFF
96; X64-NEXT:    orl %esi, %ecx
97; X64-NEXT:    movw %cx, (%rdi)
98; X64-NEXT:    retq
99  %extbit = zext i1 %bit to i24
100  %b = load i24, i24* %a, align 1
101  %extbit.shl = shl nuw nsw i24 %extbit, 13
102  %c = and i24 %b, -8193
103  %d = or i24 %c, %extbit.shl
104  store i24 %d, i24* %a, align 1
105  ret void
106}
107
108define void @i56_or(i56* %a) {
109; X86-LABEL: i56_or:
110; X86:       # %bb.0:
111; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
112; X86-NEXT:    orl $384, (%eax) # imm = 0x180
113; X86-NEXT:    retl
114;
115; X64-LABEL: i56_or:
116; X64:       # %bb.0:
117; X64-NEXT:    movzwl 4(%rdi), %eax
118; X64-NEXT:    movzbl 6(%rdi), %ecx
119; X64-NEXT:    movb %cl, 6(%rdi)
120; X64-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
121; X64-NEXT:    shll $16, %ecx
122; X64-NEXT:    orl %eax, %ecx
123; X64-NEXT:    shlq $32, %rcx
124; X64-NEXT:    movl (%rdi), %eax
125; X64-NEXT:    orq %rcx, %rax
126; X64-NEXT:    orq $384, %rax # imm = 0x180
127; X64-NEXT:    movl %eax, (%rdi)
128; X64-NEXT:    shrq $32, %rax
129; X64-NEXT:    movw %ax, 4(%rdi)
130; X64-NEXT:    retq
131  %aa = load i56, i56* %a, align 1
132  %b = or i56 %aa, 384
133  store i56 %b, i56* %a, align 1
134  ret void
135}
136
137define void @i56_and_or(i56* %a) {
138; X86-LABEL: i56_and_or:
139; X86:       # %bb.0:
140; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
141; X86-NEXT:    movl $384, %ecx # imm = 0x180
142; X86-NEXT:    orl (%eax), %ecx
143; X86-NEXT:    andl $-128, %ecx
144; X86-NEXT:    movl %ecx, (%eax)
145; X86-NEXT:    retl
146;
147; X64-LABEL: i56_and_or:
148; X64:       # %bb.0:
149; X64-NEXT:    movzwl 4(%rdi), %eax
150; X64-NEXT:    movzbl 6(%rdi), %ecx
151; X64-NEXT:    movb %cl, 6(%rdi)
152; X64-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
153; X64-NEXT:    shll $16, %ecx
154; X64-NEXT:    orl %eax, %ecx
155; X64-NEXT:    shlq $32, %rcx
156; X64-NEXT:    movl (%rdi), %eax
157; X64-NEXT:    orq %rcx, %rax
158; X64-NEXT:    orq $384, %rax # imm = 0x180
159; X64-NEXT:    movabsq $72057594037927808, %rcx # imm = 0xFFFFFFFFFFFF80
160; X64-NEXT:    andq %rax, %rcx
161; X64-NEXT:    movl %ecx, (%rdi)
162; X64-NEXT:    shrq $32, %rcx
163; X64-NEXT:    movw %cx, 4(%rdi)
164; X64-NEXT:    retq
165  %b = load i56, i56* %a, align 1
166  %c = and i56 %b, -128
167  %d = or i56 %c, 384
168  store i56 %d, i56* %a, align 1
169  ret void
170}
171
172define void @i56_insert_bit(i56* %a, i1 zeroext %bit) {
173; X86-LABEL: i56_insert_bit:
174; X86:       # %bb.0:
175; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
176; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
177; X86-NEXT:    shll $13, %ecx
178; X86-NEXT:    movl $-8193, %edx # imm = 0xDFFF
179; X86-NEXT:    andl (%eax), %edx
180; X86-NEXT:    orl %ecx, %edx
181; X86-NEXT:    movl %edx, (%eax)
182; X86-NEXT:    retl
183;
184; X64-LABEL: i56_insert_bit:
185; X64:       # %bb.0:
186; X64-NEXT:    movl %esi, %eax
187; X64-NEXT:    movzwl 4(%rdi), %ecx
188; X64-NEXT:    movzbl 6(%rdi), %edx
189; X64-NEXT:    movb %dl, 6(%rdi)
190; X64-NEXT:    # kill: def $edx killed $edx killed $rdx def $rdx
191; X64-NEXT:    shll $16, %edx
192; X64-NEXT:    orl %ecx, %edx
193; X64-NEXT:    shlq $32, %rdx
194; X64-NEXT:    movl (%rdi), %ecx
195; X64-NEXT:    orq %rdx, %rcx
196; X64-NEXT:    shlq $13, %rax
197; X64-NEXT:    movabsq $72057594037919743, %rdx # imm = 0xFFFFFFFFFFDFFF
198; X64-NEXT:    andq %rcx, %rdx
199; X64-NEXT:    orq %rax, %rdx
200; X64-NEXT:    movl %edx, (%rdi)
201; X64-NEXT:    shrq $32, %rdx
202; X64-NEXT:    movw %dx, 4(%rdi)
203; X64-NEXT:    retq
204  %extbit = zext i1 %bit to i56
205  %b = load i56, i56* %a, align 1
206  %extbit.shl = shl nuw nsw i56 %extbit, 13
207  %c = and i56 %b, -8193
208  %d = or i56 %c, %extbit.shl
209  store i56 %d, i56* %a, align 1
210  ret void
211}
212
213