• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=i686-unknown-linux-gnu   < %s | FileCheck %s --check-prefixes=CHECK,X86
3; RUN: llc -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,X64
4
5; https://bugs.llvm.org/show_bug.cgi?id=38149
6
7; We are truncating from wider width, and then sign-extending
8; back to the original width. Then we inequality-comparing orig and src.
9; If they don't match, then we had signed truncation during truncation.
10
11; This can be expressed in a several ways in IR:
12;   trunc + sext + icmp ne <- not canonical
13;   shl   + ashr + icmp ne
14;   add          + icmp ult/ule
15;   add          + icmp uge/ugt
16; However only the simplest form (with two shifts) gets lowered best.
17
18; ---------------------------------------------------------------------------- ;
19; shl + ashr + icmp ne
20; ---------------------------------------------------------------------------- ;
21
22define i1 @shifts_necmp_i16_i8(i16 %x) nounwind {
23; X86-LABEL: shifts_necmp_i16_i8:
24; X86:       # %bb.0:
25; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
26; X86-NEXT:    movsbl %al, %ecx
27; X86-NEXT:    cmpw %ax, %cx
28; X86-NEXT:    setne %al
29; X86-NEXT:    retl
30;
31; X64-LABEL: shifts_necmp_i16_i8:
32; X64:       # %bb.0:
33; X64-NEXT:    movsbl %dil, %eax
34; X64-NEXT:    cmpw %di, %ax
35; X64-NEXT:    setne %al
36; X64-NEXT:    retq
37  %tmp0 = shl i16 %x, 8 ; 16-8
38  %tmp1 = ashr exact i16 %tmp0, 8 ; 16-8
39  %tmp2 = icmp ne i16 %tmp1, %x
40  ret i1 %tmp2
41}
42
43define i1 @shifts_necmp_i32_i16(i32 %x) nounwind {
44; X86-LABEL: shifts_necmp_i32_i16:
45; X86:       # %bb.0:
46; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
47; X86-NEXT:    movswl %ax, %ecx
48; X86-NEXT:    cmpl %eax, %ecx
49; X86-NEXT:    setne %al
50; X86-NEXT:    retl
51;
52; X64-LABEL: shifts_necmp_i32_i16:
53; X64:       # %bb.0:
54; X64-NEXT:    movswl %di, %eax
55; X64-NEXT:    cmpl %edi, %eax
56; X64-NEXT:    setne %al
57; X64-NEXT:    retq
58  %tmp0 = shl i32 %x, 16 ; 32-16
59  %tmp1 = ashr exact i32 %tmp0, 16 ; 32-16
60  %tmp2 = icmp ne i32 %tmp1, %x
61  ret i1 %tmp2
62}
63
64define i1 @shifts_necmp_i32_i8(i32 %x) nounwind {
65; X86-LABEL: shifts_necmp_i32_i8:
66; X86:       # %bb.0:
67; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
68; X86-NEXT:    movsbl %al, %ecx
69; X86-NEXT:    cmpl %eax, %ecx
70; X86-NEXT:    setne %al
71; X86-NEXT:    retl
72;
73; X64-LABEL: shifts_necmp_i32_i8:
74; X64:       # %bb.0:
75; X64-NEXT:    movsbl %dil, %eax
76; X64-NEXT:    cmpl %edi, %eax
77; X64-NEXT:    setne %al
78; X64-NEXT:    retq
79  %tmp0 = shl i32 %x, 24 ; 32-8
80  %tmp1 = ashr exact i32 %tmp0, 24 ; 32-8
81  %tmp2 = icmp ne i32 %tmp1, %x
82  ret i1 %tmp2
83}
84
85define i1 @shifts_necmp_i64_i32(i64 %x) nounwind {
86; X86-LABEL: shifts_necmp_i64_i32:
87; X86:       # %bb.0:
88; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
89; X86-NEXT:    sarl $31, %eax
90; X86-NEXT:    xorl {{[0-9]+}}(%esp), %eax
91; X86-NEXT:    setne %al
92; X86-NEXT:    retl
93;
94; X64-LABEL: shifts_necmp_i64_i32:
95; X64:       # %bb.0:
96; X64-NEXT:    movslq %edi, %rax
97; X64-NEXT:    cmpq %rdi, %rax
98; X64-NEXT:    setne %al
99; X64-NEXT:    retq
100  %tmp0 = shl i64 %x, 32 ; 64-32
101  %tmp1 = ashr exact i64 %tmp0, 32 ; 64-32
102  %tmp2 = icmp ne i64 %tmp1, %x
103  ret i1 %tmp2
104}
105
106define i1 @shifts_necmp_i64_i16(i64 %x) nounwind {
107; X86-LABEL: shifts_necmp_i64_i16:
108; X86:       # %bb.0:
109; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
110; X86-NEXT:    movswl %ax, %ecx
111; X86-NEXT:    movl %ecx, %edx
112; X86-NEXT:    sarl $31, %edx
113; X86-NEXT:    xorl %eax, %ecx
114; X86-NEXT:    xorl {{[0-9]+}}(%esp), %edx
115; X86-NEXT:    orl %ecx, %edx
116; X86-NEXT:    setne %al
117; X86-NEXT:    retl
118;
119; X64-LABEL: shifts_necmp_i64_i16:
120; X64:       # %bb.0:
121; X64-NEXT:    movswq %di, %rax
122; X64-NEXT:    cmpq %rdi, %rax
123; X64-NEXT:    setne %al
124; X64-NEXT:    retq
125  %tmp0 = shl i64 %x, 48 ; 64-16
126  %tmp1 = ashr exact i64 %tmp0, 48 ; 64-16
127  %tmp2 = icmp ne i64 %tmp1, %x
128  ret i1 %tmp2
129}
130
131define i1 @shifts_necmp_i64_i8(i64 %x) nounwind {
132; X86-LABEL: shifts_necmp_i64_i8:
133; X86:       # %bb.0:
134; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
135; X86-NEXT:    movsbl %al, %ecx
136; X86-NEXT:    movl %ecx, %edx
137; X86-NEXT:    sarl $31, %edx
138; X86-NEXT:    xorl %eax, %ecx
139; X86-NEXT:    xorl {{[0-9]+}}(%esp), %edx
140; X86-NEXT:    orl %ecx, %edx
141; X86-NEXT:    setne %al
142; X86-NEXT:    retl
143;
144; X64-LABEL: shifts_necmp_i64_i8:
145; X64:       # %bb.0:
146; X64-NEXT:    movsbq %dil, %rax
147; X64-NEXT:    cmpq %rdi, %rax
148; X64-NEXT:    setne %al
149; X64-NEXT:    retq
150  %tmp0 = shl i64 %x, 56 ; 64-8
151  %tmp1 = ashr exact i64 %tmp0, 56 ; 64-8
152  %tmp2 = icmp ne i64 %tmp1, %x
153  ret i1 %tmp2
154}
155
156; ---------------------------------------------------------------------------- ;
157; add + icmp ult
158; ---------------------------------------------------------------------------- ;
159
160define i1 @add_ultcmp_i16_i8(i16 %x) nounwind {
161; X86-LABEL: add_ultcmp_i16_i8:
162; X86:       # %bb.0:
163; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
164; X86-NEXT:    movsbl %al, %ecx
165; X86-NEXT:    cmpw %ax, %cx
166; X86-NEXT:    setne %al
167; X86-NEXT:    retl
168;
169; X64-LABEL: add_ultcmp_i16_i8:
170; X64:       # %bb.0:
171; X64-NEXT:    movsbl %dil, %eax
172; X64-NEXT:    cmpw %di, %ax
173; X64-NEXT:    setne %al
174; X64-NEXT:    retq
175  %tmp0 = add i16 %x, -128 ; ~0U << (8-1)
176  %tmp1 = icmp ult i16 %tmp0, -256 ; ~0U << 8
177  ret i1 %tmp1
178}
179
180define i1 @add_ultcmp_i32_i16(i32 %x) nounwind {
181; X86-LABEL: add_ultcmp_i32_i16:
182; X86:       # %bb.0:
183; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
184; X86-NEXT:    movswl %ax, %ecx
185; X86-NEXT:    cmpl %eax, %ecx
186; X86-NEXT:    setne %al
187; X86-NEXT:    retl
188;
189; X64-LABEL: add_ultcmp_i32_i16:
190; X64:       # %bb.0:
191; X64-NEXT:    movswl %di, %eax
192; X64-NEXT:    cmpl %edi, %eax
193; X64-NEXT:    setne %al
194; X64-NEXT:    retq
195  %tmp0 = add i32 %x, -32768 ; ~0U << (16-1)
196  %tmp1 = icmp ult i32 %tmp0, -65536 ; ~0U << 16
197  ret i1 %tmp1
198}
199
200define i1 @add_ultcmp_i32_i8(i32 %x) nounwind {
201; X86-LABEL: add_ultcmp_i32_i8:
202; X86:       # %bb.0:
203; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
204; X86-NEXT:    movsbl %al, %ecx
205; X86-NEXT:    cmpl %eax, %ecx
206; X86-NEXT:    setne %al
207; X86-NEXT:    retl
208;
209; X64-LABEL: add_ultcmp_i32_i8:
210; X64:       # %bb.0:
211; X64-NEXT:    movsbl %dil, %eax
212; X64-NEXT:    cmpl %edi, %eax
213; X64-NEXT:    setne %al
214; X64-NEXT:    retq
215  %tmp0 = add i32 %x, -128 ; ~0U << (8-1)
216  %tmp1 = icmp ult i32 %tmp0, -256 ; ~0U << 8
217  ret i1 %tmp1
218}
219
220define i1 @add_ultcmp_i64_i32(i64 %x) nounwind {
221; X86-LABEL: add_ultcmp_i64_i32:
222; X86:       # %bb.0:
223; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
224; X86-NEXT:    sarl $31, %eax
225; X86-NEXT:    xorl {{[0-9]+}}(%esp), %eax
226; X86-NEXT:    setne %al
227; X86-NEXT:    retl
228;
229; X64-LABEL: add_ultcmp_i64_i32:
230; X64:       # %bb.0:
231; X64-NEXT:    movslq %edi, %rax
232; X64-NEXT:    cmpq %rdi, %rax
233; X64-NEXT:    setne %al
234; X64-NEXT:    retq
235  %tmp0 = add i64 %x, -2147483648 ; ~0U << (32-1)
236  %tmp1 = icmp ult i64 %tmp0, -4294967296 ; ~0U << 32
237  ret i1 %tmp1
238}
239
240define i1 @add_ultcmp_i64_i16(i64 %x) nounwind {
241; X86-LABEL: add_ultcmp_i64_i16:
242; X86:       # %bb.0:
243; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
244; X86-NEXT:    movswl %ax, %ecx
245; X86-NEXT:    xorl %ecx, %eax
246; X86-NEXT:    sarl $31, %ecx
247; X86-NEXT:    xorl {{[0-9]+}}(%esp), %ecx
248; X86-NEXT:    orl %eax, %ecx
249; X86-NEXT:    setne %al
250; X86-NEXT:    retl
251;
252; X64-LABEL: add_ultcmp_i64_i16:
253; X64:       # %bb.0:
254; X64-NEXT:    movswq %di, %rax
255; X64-NEXT:    cmpq %rdi, %rax
256; X64-NEXT:    setne %al
257; X64-NEXT:    retq
258  %tmp0 = add i64 %x, -32768 ; ~0U << (16-1)
259  %tmp1 = icmp ult i64 %tmp0, -65536 ; ~0U << 16
260  ret i1 %tmp1
261}
262
263define i1 @add_ultcmp_i64_i8(i64 %x) nounwind {
264; X86-LABEL: add_ultcmp_i64_i8:
265; X86:       # %bb.0:
266; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
267; X86-NEXT:    movsbl %al, %ecx
268; X86-NEXT:    xorl %ecx, %eax
269; X86-NEXT:    sarl $31, %ecx
270; X86-NEXT:    xorl {{[0-9]+}}(%esp), %ecx
271; X86-NEXT:    orl %eax, %ecx
272; X86-NEXT:    setne %al
273; X86-NEXT:    retl
274;
275; X64-LABEL: add_ultcmp_i64_i8:
276; X64:       # %bb.0:
277; X64-NEXT:    movsbq %dil, %rax
278; X64-NEXT:    cmpq %rdi, %rax
279; X64-NEXT:    setne %al
280; X64-NEXT:    retq
281  %tmp0 = add i64 %x, -128 ; ~0U << (8-1)
282  %tmp1 = icmp ult i64 %tmp0, -256 ; ~0U << 8
283  ret i1 %tmp1
284}
285
286; Slightly more canonical variant
287define i1 @add_ulecmp_i16_i8(i16 %x) nounwind {
288; X86-LABEL: add_ulecmp_i16_i8:
289; X86:       # %bb.0:
290; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
291; X86-NEXT:    movsbl %al, %ecx
292; X86-NEXT:    cmpw %ax, %cx
293; X86-NEXT:    setne %al
294; X86-NEXT:    retl
295;
296; X64-LABEL: add_ulecmp_i16_i8:
297; X64:       # %bb.0:
298; X64-NEXT:    movsbl %dil, %eax
299; X64-NEXT:    cmpw %di, %ax
300; X64-NEXT:    setne %al
301; X64-NEXT:    retq
302  %tmp0 = add i16 %x, -128 ; ~0U << (8-1)
303  %tmp1 = icmp ule i16 %tmp0, -257 ; ~0U << 8 - 1
304  ret i1 %tmp1
305}
306
307; ---------------------------------------------------------------------------- ;
308; add + icmp uge
309; ---------------------------------------------------------------------------- ;
310
311define i1 @add_ugecmp_i16_i8(i16 %x) nounwind {
312; X86-LABEL: add_ugecmp_i16_i8:
313; X86:       # %bb.0:
314; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
315; X86-NEXT:    movsbl %al, %ecx
316; X86-NEXT:    cmpw %ax, %cx
317; X86-NEXT:    setne %al
318; X86-NEXT:    retl
319;
320; X64-LABEL: add_ugecmp_i16_i8:
321; X64:       # %bb.0:
322; X64-NEXT:    movsbl %dil, %eax
323; X64-NEXT:    cmpw %di, %ax
324; X64-NEXT:    setne %al
325; X64-NEXT:    retq
326  %tmp0 = add i16 %x, 128 ; 1U << (8-1)
327  %tmp1 = icmp uge i16 %tmp0, 256 ; 1U << 8
328  ret i1 %tmp1
329}
330
331define i1 @add_ugecmp_i32_i16(i32 %x) nounwind {
332; X86-LABEL: add_ugecmp_i32_i16:
333; X86:       # %bb.0:
334; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
335; X86-NEXT:    movswl %ax, %ecx
336; X86-NEXT:    cmpl %eax, %ecx
337; X86-NEXT:    setne %al
338; X86-NEXT:    retl
339;
340; X64-LABEL: add_ugecmp_i32_i16:
341; X64:       # %bb.0:
342; X64-NEXT:    movswl %di, %eax
343; X64-NEXT:    cmpl %edi, %eax
344; X64-NEXT:    setne %al
345; X64-NEXT:    retq
346  %tmp0 = add i32 %x, 32768 ; 1U << (16-1)
347  %tmp1 = icmp uge i32 %tmp0, 65536 ; 1U << 16
348  ret i1 %tmp1
349}
350
351define i1 @add_ugecmp_i32_i8(i32 %x) nounwind {
352; X86-LABEL: add_ugecmp_i32_i8:
353; X86:       # %bb.0:
354; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
355; X86-NEXT:    movsbl %al, %ecx
356; X86-NEXT:    cmpl %eax, %ecx
357; X86-NEXT:    setne %al
358; X86-NEXT:    retl
359;
360; X64-LABEL: add_ugecmp_i32_i8:
361; X64:       # %bb.0:
362; X64-NEXT:    movsbl %dil, %eax
363; X64-NEXT:    cmpl %edi, %eax
364; X64-NEXT:    setne %al
365; X64-NEXT:    retq
366  %tmp0 = add i32 %x, 128 ; 1U << (8-1)
367  %tmp1 = icmp uge i32 %tmp0, 256 ; 1U << 8
368  ret i1 %tmp1
369}
370
371define i1 @add_ugecmp_i64_i32(i64 %x) nounwind {
372; X86-LABEL: add_ugecmp_i64_i32:
373; X86:       # %bb.0:
374; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
375; X86-NEXT:    sarl $31, %eax
376; X86-NEXT:    xorl {{[0-9]+}}(%esp), %eax
377; X86-NEXT:    setne %al
378; X86-NEXT:    retl
379;
380; X64-LABEL: add_ugecmp_i64_i32:
381; X64:       # %bb.0:
382; X64-NEXT:    movslq %edi, %rax
383; X64-NEXT:    cmpq %rdi, %rax
384; X64-NEXT:    setne %al
385; X64-NEXT:    retq
386  %tmp0 = add i64 %x, 2147483648 ; 1U << (32-1)
387  %tmp1 = icmp uge i64 %tmp0, 4294967296 ; 1U << 32
388  ret i1 %tmp1
389}
390
391define i1 @add_ugecmp_i64_i16(i64 %x) nounwind {
392; X86-LABEL: add_ugecmp_i64_i16:
393; X86:       # %bb.0:
394; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
395; X86-NEXT:    movswl %ax, %ecx
396; X86-NEXT:    xorl %ecx, %eax
397; X86-NEXT:    sarl $31, %ecx
398; X86-NEXT:    xorl {{[0-9]+}}(%esp), %ecx
399; X86-NEXT:    orl %eax, %ecx
400; X86-NEXT:    setne %al
401; X86-NEXT:    retl
402;
403; X64-LABEL: add_ugecmp_i64_i16:
404; X64:       # %bb.0:
405; X64-NEXT:    movswq %di, %rax
406; X64-NEXT:    cmpq %rdi, %rax
407; X64-NEXT:    setne %al
408; X64-NEXT:    retq
409  %tmp0 = add i64 %x, 32768 ; 1U << (16-1)
410  %tmp1 = icmp uge i64 %tmp0, 65536 ; 1U << 16
411  ret i1 %tmp1
412}
413
414define i1 @add_ugecmp_i64_i8(i64 %x) nounwind {
415; X86-LABEL: add_ugecmp_i64_i8:
416; X86:       # %bb.0:
417; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
418; X86-NEXT:    movsbl %al, %ecx
419; X86-NEXT:    xorl %ecx, %eax
420; X86-NEXT:    sarl $31, %ecx
421; X86-NEXT:    xorl {{[0-9]+}}(%esp), %ecx
422; X86-NEXT:    orl %eax, %ecx
423; X86-NEXT:    setne %al
424; X86-NEXT:    retl
425;
426; X64-LABEL: add_ugecmp_i64_i8:
427; X64:       # %bb.0:
428; X64-NEXT:    movsbq %dil, %rax
429; X64-NEXT:    cmpq %rdi, %rax
430; X64-NEXT:    setne %al
431; X64-NEXT:    retq
432  %tmp0 = add i64 %x, 128 ; 1U << (8-1)
433  %tmp1 = icmp uge i64 %tmp0, 256 ; 1U << 8
434  ret i1 %tmp1
435}
436
437; Slightly more canonical variant
438define i1 @add_ugtcmp_i16_i8(i16 %x) nounwind {
439; X86-LABEL: add_ugtcmp_i16_i8:
440; X86:       # %bb.0:
441; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
442; X86-NEXT:    movsbl %al, %ecx
443; X86-NEXT:    cmpw %ax, %cx
444; X86-NEXT:    setne %al
445; X86-NEXT:    retl
446;
447; X64-LABEL: add_ugtcmp_i16_i8:
448; X64:       # %bb.0:
449; X64-NEXT:    movsbl %dil, %eax
450; X64-NEXT:    cmpw %di, %ax
451; X64-NEXT:    setne %al
452; X64-NEXT:    retq
453  %tmp0 = add i16 %x, 128 ; 1U << (8-1)
454  %tmp1 = icmp ugt i16 %tmp0, 255 ; (1U << 8) - 1
455  ret i1 %tmp1
456}
457
458; Negative tests
459; ---------------------------------------------------------------------------- ;
460
461; Adding not a constant
462define i1 @add_ugecmp_bad_i16_i8_add(i16 %x, i16 %y) nounwind {
463; X86-LABEL: add_ugecmp_bad_i16_i8_add:
464; X86:       # %bb.0:
465; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
466; X86-NEXT:    addw {{[0-9]+}}(%esp), %ax
467; X86-NEXT:    movzwl %ax, %eax
468; X86-NEXT:    cmpl $255, %eax
469; X86-NEXT:    seta %al
470; X86-NEXT:    retl
471;
472; X64-LABEL: add_ugecmp_bad_i16_i8_add:
473; X64:       # %bb.0:
474; X64-NEXT:    addl %esi, %edi
475; X64-NEXT:    movzwl %di, %eax
476; X64-NEXT:    cmpl $255, %eax
477; X64-NEXT:    seta %al
478; X64-NEXT:    retq
479  %tmp0 = add i16 %x, %y
480  %tmp1 = icmp uge i16 %tmp0, 256 ; 1U << 8
481  ret i1 %tmp1
482}
483
484; Comparing not with a constant
485define i1 @add_ugecmp_bad_i16_i8_cmp(i16 %x, i16 %y) nounwind {
486; X86-LABEL: add_ugecmp_bad_i16_i8_cmp:
487; X86:       # %bb.0:
488; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
489; X86-NEXT:    subl $-128, %eax
490; X86-NEXT:    cmpw {{[0-9]+}}(%esp), %ax
491; X86-NEXT:    setae %al
492; X86-NEXT:    retl
493;
494; X64-LABEL: add_ugecmp_bad_i16_i8_cmp:
495; X64:       # %bb.0:
496; X64-NEXT:    subl $-128, %edi
497; X64-NEXT:    cmpw %si, %di
498; X64-NEXT:    setae %al
499; X64-NEXT:    retq
500  %tmp0 = add i16 %x, 128 ; 1U << (8-1)
501  %tmp1 = icmp uge i16 %tmp0, %y
502  ret i1 %tmp1
503}
504
505; Second constant is not larger than the first one
506define i1 @add_ugecmp_bad_i8_i16(i16 %x) nounwind {
507; X86-LABEL: add_ugecmp_bad_i8_i16:
508; X86:       # %bb.0:
509; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
510; X86-NEXT:    subl $-128, %eax
511; X86-NEXT:    cmpw $127, %ax
512; X86-NEXT:    seta %al
513; X86-NEXT:    retl
514;
515; X64-LABEL: add_ugecmp_bad_i8_i16:
516; X64:       # %bb.0:
517; X64-NEXT:    subl $-128, %edi
518; X64-NEXT:    cmpw $127, %di
519; X64-NEXT:    seta %al
520; X64-NEXT:    retq
521  %tmp0 = add i16 %x, 128 ; 1U << (8-1)
522  %tmp1 = icmp uge i16 %tmp0, 128 ; 1U << (8-1)
523  ret i1 %tmp1
524}
525
526; First constant is not power of two
527define i1 @add_ugecmp_bad_i16_i8_c0notpoweroftwo(i16 %x) nounwind {
528; X86-LABEL: add_ugecmp_bad_i16_i8_c0notpoweroftwo:
529; X86:       # %bb.0:
530; X86-NEXT:    movl $192, %eax
531; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
532; X86-NEXT:    movzwl %ax, %eax
533; X86-NEXT:    cmpl $255, %eax
534; X86-NEXT:    seta %al
535; X86-NEXT:    retl
536;
537; X64-LABEL: add_ugecmp_bad_i16_i8_c0notpoweroftwo:
538; X64:       # %bb.0:
539; X64-NEXT:    addl $192, %edi
540; X64-NEXT:    movzwl %di, %eax
541; X64-NEXT:    cmpl $255, %eax
542; X64-NEXT:    seta %al
543; X64-NEXT:    retq
544  %tmp0 = add i16 %x, 192 ; (1U << (8-1)) + (1U << (8-1-1))
545  %tmp1 = icmp uge i16 %tmp0, 256 ; 1U << 8
546  ret i1 %tmp1
547}
548
549; Second constant is not power of two
550define i1 @add_ugecmp_bad_i16_i8_c1notpoweroftwo(i16 %x) nounwind {
551; X86-LABEL: add_ugecmp_bad_i16_i8_c1notpoweroftwo:
552; X86:       # %bb.0:
553; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
554; X86-NEXT:    subl $-128, %eax
555; X86-NEXT:    movzwl %ax, %eax
556; X86-NEXT:    cmpl $767, %eax # imm = 0x2FF
557; X86-NEXT:    seta %al
558; X86-NEXT:    retl
559;
560; X64-LABEL: add_ugecmp_bad_i16_i8_c1notpoweroftwo:
561; X64:       # %bb.0:
562; X64-NEXT:    subl $-128, %edi
563; X64-NEXT:    movzwl %di, %eax
564; X64-NEXT:    cmpl $767, %eax # imm = 0x2FF
565; X64-NEXT:    seta %al
566; X64-NEXT:    retq
567  %tmp0 = add i16 %x, 128 ; 1U << (8-1)
568  %tmp1 = icmp uge i16 %tmp0, 768 ; (1U << 8)) + (1U << (8+1))
569  ret i1 %tmp1
570}
571
572; Magic check fails, 64 << 1 != 256
573define i1 @add_ugecmp_bad_i16_i8_magic(i16 %x) nounwind {
574; X86-LABEL: add_ugecmp_bad_i16_i8_magic:
575; X86:       # %bb.0:
576; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
577; X86-NEXT:    addl $64, %eax
578; X86-NEXT:    movzwl %ax, %eax
579; X86-NEXT:    cmpl $255, %eax
580; X86-NEXT:    seta %al
581; X86-NEXT:    retl
582;
583; X64-LABEL: add_ugecmp_bad_i16_i8_magic:
584; X64:       # %bb.0:
585; X64-NEXT:    addl $64, %edi
586; X64-NEXT:    movzwl %di, %eax
587; X64-NEXT:    cmpl $255, %eax
588; X64-NEXT:    seta %al
589; X64-NEXT:    retq
590  %tmp0 = add i16 %x, 64 ; 1U << (8-1-1)
591  %tmp1 = icmp uge i16 %tmp0, 256 ; 1U << 8
592  ret i1 %tmp1
593}
594
595; Bad 'destination type'
596define i1 @add_ugecmp_bad_i16_i4(i16 %x) nounwind {
597; X86-LABEL: add_ugecmp_bad_i16_i4:
598; X86:       # %bb.0:
599; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
600; X86-NEXT:    addl $8, %eax
601; X86-NEXT:    cmpw $15, %ax
602; X86-NEXT:    seta %al
603; X86-NEXT:    retl
604;
605; X64-LABEL: add_ugecmp_bad_i16_i4:
606; X64:       # %bb.0:
607; X64-NEXT:    addl $8, %edi
608; X64-NEXT:    cmpw $15, %di
609; X64-NEXT:    seta %al
610; X64-NEXT:    retq
611  %tmp0 = add i16 %x, 8 ; 1U << (4-1)
612  %tmp1 = icmp uge i16 %tmp0, 16 ; 1U << 4
613  ret i1 %tmp1
614}
615
616; Bad storage type
617define i1 @add_ugecmp_bad_i24_i8(i24 %x) nounwind {
618; X86-LABEL: add_ugecmp_bad_i24_i8:
619; X86:       # %bb.0:
620; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
621; X86-NEXT:    subl $-128, %eax
622; X86-NEXT:    andl $16777215, %eax # imm = 0xFFFFFF
623; X86-NEXT:    cmpl $255, %eax
624; X86-NEXT:    seta %al
625; X86-NEXT:    retl
626;
627; X64-LABEL: add_ugecmp_bad_i24_i8:
628; X64:       # %bb.0:
629; X64-NEXT:    subl $-128, %edi
630; X64-NEXT:    andl $16777215, %edi # imm = 0xFFFFFF
631; X64-NEXT:    cmpl $255, %edi
632; X64-NEXT:    seta %al
633; X64-NEXT:    retq
634  %tmp0 = add i24 %x, 128 ; 1U << (8-1)
635  %tmp1 = icmp uge i24 %tmp0, 256 ; 1U << 8
636  ret i1 %tmp1
637}
638
639; Slightly more canonical variant
640define i1 @add_ugtcmp_bad_i16_i8(i16 %x) nounwind {
641; CHECK-LABEL: add_ugtcmp_bad_i16_i8:
642; CHECK:       # %bb.0:
643; CHECK-NEXT:    xorl %eax, %eax
644; CHECK-NEXT:    ret{{[l|q]}}
645  %tmp0 = add i16 %x, 128 ; 1U << (8-1)
646  %tmp1 = icmp ugt i16 %tmp0, -1 ; when we +1 it, it will wrap to 0
647  ret i1 %tmp1
648}
649