• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=i686-unknown-linux-gnu < %s | FileCheck %s --check-prefix=X86
3; RUN: llc -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s --check-prefix=X64
4
5; Fold
6;   ~(X - 1)
7; To
8;   - X
9;
10; This needs to be a backend-level fold because only by now pointers
11; are just registers; in middle-end IR this can only be done via @llvm.ptrmask()
12; intrinsic which is not sufficiently widely-spread yet.
13;
14; https://bugs.llvm.org/show_bug.cgi?id=44448
15
16; The basic positive tests
17
18define i32 @t0_32(i32 %alignment) nounwind {
19; X86-LABEL: t0_32:
20; X86:       # %bb.0:
21; X86-NEXT:    xorl %eax, %eax
22; X86-NEXT:    subl {{[0-9]+}}(%esp), %eax
23; X86-NEXT:    retl
24;
25; X64-LABEL: t0_32:
26; X64:       # %bb.0:
27; X64-NEXT:    movl %edi, %eax
28; X64-NEXT:    negl %eax
29; X64-NEXT:    retq
30  %mask = add i32 %alignment, -1
31  %invmask = xor i32 %mask, -1
32  ret i32 %invmask
33}
34define i64 @t1_64(i64 %alignment) nounwind {
35; X86-LABEL: t1_64:
36; X86:       # %bb.0:
37; X86-NEXT:    xorl %edx, %edx
38; X86-NEXT:    xorl %eax, %eax
39; X86-NEXT:    subl {{[0-9]+}}(%esp), %eax
40; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %edx
41; X86-NEXT:    retl
42;
43; X64-LABEL: t1_64:
44; X64:       # %bb.0:
45; X64-NEXT:    movq %rdi, %rax
46; X64-NEXT:    negq %rax
47; X64-NEXT:    retq
48  %mask = add i64 %alignment, -1
49  %invmask = xor i64 %mask, -1
50  ret i64 %invmask
51}
52
53; Extra use test
54
55define i32 @t2_extrause(i32 %alignment, i32* %mask_storage) nounwind {
56; X86-LABEL: t2_extrause:
57; X86:       # %bb.0:
58; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
59; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
60; X86-NEXT:    leal -1(%eax), %edx
61; X86-NEXT:    movl %edx, (%ecx)
62; X86-NEXT:    negl %eax
63; X86-NEXT:    retl
64;
65; X64-LABEL: t2_extrause:
66; X64:       # %bb.0:
67; X64-NEXT:    movl %edi, %eax
68; X64-NEXT:    leal -1(%rax), %ecx
69; X64-NEXT:    movl %ecx, (%rsi)
70; X64-NEXT:    negl %eax
71; X64-NEXT:    # kill: def $eax killed $eax killed $rax
72; X64-NEXT:    retq
73  %mask = add i32 %alignment, -1
74  store i32 %mask, i32* %mask_storage
75  %invmask = xor i32 %mask, -1
76  ret i32 %invmask
77}
78
79; Negative tests
80
81define i32 @n3_not_dec(i32 %alignment) nounwind {
82; X86-LABEL: n3_not_dec:
83; X86:       # %bb.0:
84; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
85; X86-NEXT:    incl %eax
86; X86-NEXT:    notl %eax
87; X86-NEXT:    retl
88;
89; X64-LABEL: n3_not_dec:
90; X64:       # %bb.0:
91; X64-NEXT:    # kill: def $edi killed $edi def $rdi
92; X64-NEXT:    leal 1(%rdi), %eax
93; X64-NEXT:    notl %eax
94; X64-NEXT:    retq
95  %mask = add i32 %alignment, 1 ; not -1
96  %invmask = xor i32 %mask, -1
97  ret i32 %invmask
98}
99
100define i32 @n4_not_not(i32 %alignment) nounwind {
101; X86-LABEL: n4_not_not:
102; X86:       # %bb.0:
103; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
104; X86-NEXT:    decl %eax
105; X86-NEXT:    xorl $1, %eax
106; X86-NEXT:    retl
107;
108; X64-LABEL: n4_not_not:
109; X64:       # %bb.0:
110; X64-NEXT:    # kill: def $edi killed $edi def $rdi
111; X64-NEXT:    leal -1(%rdi), %eax
112; X64-NEXT:    xorl $1, %eax
113; X64-NEXT:    retq
114  %mask = add i32 %alignment, -1
115  %invmask = xor i32 %mask, 1 ; not -1
116  ret i32 %invmask
117}
118