• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: opt -S %s -atomic-expand | FileCheck %s
2
3;; Verify the cmpxchg and atomicrmw expansions where sub-word-size
4;; instructions are not available.
5
6;;; NOTE: this test is mostly target-independent -- any target which
7;;; doesn't support cmpxchg of sub-word sizes would do.
8target datalayout = "E-m:e-i64:64-n32:64-S128"
9target triple = "sparcv9-unknown-unknown"
10
11; CHECK-LABEL: @test_cmpxchg_i8(
12; CHECK:  fence seq_cst
13; CHECK:  %0 = ptrtoint i8* %arg to i64
14; CHECK:  %1 = and i64 %0, -4
15; CHECK:  %AlignedAddr = inttoptr i64 %1 to i32*
16; CHECK:  %PtrLSB = and i64 %0, 3
17; CHECK:  %2 = xor i64 %PtrLSB, 3
18; CHECK:  %3 = shl i64 %2, 3
19; CHECK:  %ShiftAmt = trunc i64 %3 to i32
20; CHECK:  %Mask = shl i32 255, %ShiftAmt
21; CHECK:  %Inv_Mask = xor i32 %Mask, -1
22; CHECK:  %4 = zext i8 %new to i32
23; CHECK:  %5 = shl i32 %4, %ShiftAmt
24; CHECK:  %6 = zext i8 %old to i32
25; CHECK:  %7 = shl i32 %6, %ShiftAmt
26; CHECK:  %8 = load i32, i32* %AlignedAddr
27; CHECK:  %9 = and i32 %8, %Inv_Mask
28; CHECK:  br label %partword.cmpxchg.loop
29; CHECK:partword.cmpxchg.loop:
30; CHECK:  %10 = phi i32 [ %9, %entry ], [ %16, %partword.cmpxchg.failure ]
31; CHECK:  %11 = or i32 %10, %5
32; CHECK:  %12 = or i32 %10, %7
33; CHECK:  %13 = cmpxchg i32* %AlignedAddr, i32 %12, i32 %11 monotonic monotonic
34; CHECK:  %14 = extractvalue { i32, i1 } %13, 0
35; CHECK:  %15 = extractvalue { i32, i1 } %13, 1
36; CHECK:  br i1 %15, label %partword.cmpxchg.end, label %partword.cmpxchg.failure
37; CHECK:partword.cmpxchg.failure:
38; CHECK:  %16 = and i32 %14, %Inv_Mask
39; CHECK:  %17 = icmp ne i32 %10, %16
40; CHECK:  br i1 %17, label %partword.cmpxchg.loop, label %partword.cmpxchg.end
41; CHECK:partword.cmpxchg.end:
42; CHECK:  %shifted = lshr i32 %14, %ShiftAmt
43; CHECK:  %extracted = trunc i32 %shifted to i8
44; CHECK:  %18 = insertvalue { i8, i1 } undef, i8 %extracted, 0
45; CHECK:  %19 = insertvalue { i8, i1 } %18, i1 %15, 1
46; CHECK:  fence seq_cst
47; CHECK:  %ret = extractvalue { i8, i1 } %19, 0
48; CHECK:  ret i8 %ret
49define i8 @test_cmpxchg_i8(i8* %arg, i8 %old, i8 %new) {
50entry:
51  %ret_succ = cmpxchg i8* %arg, i8 %old, i8 %new seq_cst monotonic
52  %ret = extractvalue { i8, i1 } %ret_succ, 0
53  ret i8 %ret
54}
55
56; CHECK-LABEL: @test_cmpxchg_i16(
57; CHECK:  fence seq_cst
58; CHECK:  %0 = ptrtoint i16* %arg to i64
59; CHECK:  %1 = and i64 %0, -4
60; CHECK:  %AlignedAddr = inttoptr i64 %1 to i32*
61; CHECK:  %PtrLSB = and i64 %0, 3
62; CHECK:  %2 = xor i64 %PtrLSB, 2
63; CHECK:  %3 = shl i64 %2, 3
64; CHECK:  %ShiftAmt = trunc i64 %3 to i32
65; CHECK:  %Mask = shl i32 65535, %ShiftAmt
66; CHECK:  %Inv_Mask = xor i32 %Mask, -1
67; CHECK:  %4 = zext i16 %new to i32
68; CHECK:  %5 = shl i32 %4, %ShiftAmt
69; CHECK:  %6 = zext i16 %old to i32
70; CHECK:  %7 = shl i32 %6, %ShiftAmt
71; CHECK:  %8 = load i32, i32* %AlignedAddr
72; CHECK:  %9 = and i32 %8, %Inv_Mask
73; CHECK:  br label %partword.cmpxchg.loop
74; CHECK:partword.cmpxchg.loop:
75; CHECK:  %10 = phi i32 [ %9, %entry ], [ %16, %partword.cmpxchg.failure ]
76; CHECK:  %11 = or i32 %10, %5
77; CHECK:  %12 = or i32 %10, %7
78; CHECK:  %13 = cmpxchg i32* %AlignedAddr, i32 %12, i32 %11 monotonic monotonic
79; CHECK:  %14 = extractvalue { i32, i1 } %13, 0
80; CHECK:  %15 = extractvalue { i32, i1 } %13, 1
81; CHECK:  br i1 %15, label %partword.cmpxchg.end, label %partword.cmpxchg.failure
82; CHECK:partword.cmpxchg.failure:
83; CHECK:  %16 = and i32 %14, %Inv_Mask
84; CHECK:  %17 = icmp ne i32 %10, %16
85; CHECK:  br i1 %17, label %partword.cmpxchg.loop, label %partword.cmpxchg.end
86; CHECK:partword.cmpxchg.end:
87; CHECK:  %shifted = lshr i32 %14, %ShiftAmt
88; CHECK:  %extracted = trunc i32 %shifted to i16
89; CHECK:  %18 = insertvalue { i16, i1 } undef, i16 %extracted, 0
90; CHECK:  %19 = insertvalue { i16, i1 } %18, i1 %15, 1
91; CHECK:  fence seq_cst
92; CHECK:  %ret = extractvalue { i16, i1 } %19, 0
93; CHECK:  ret i16 %ret
94define i16 @test_cmpxchg_i16(i16* %arg, i16 %old, i16 %new) {
95entry:
96  %ret_succ = cmpxchg i16* %arg, i16 %old, i16 %new seq_cst monotonic
97  %ret = extractvalue { i16, i1 } %ret_succ, 0
98  ret i16 %ret
99}
100
101
102; CHECK-LABEL: @test_add_i16(
103; CHECK:  fence seq_cst
104; CHECK:  %0 = ptrtoint i16* %arg to i64
105; CHECK:  %1 = and i64 %0, -4
106; CHECK:  %AlignedAddr = inttoptr i64 %1 to i32*
107; CHECK:  %PtrLSB = and i64 %0, 3
108; CHECK:  %2 = xor i64 %PtrLSB, 2
109; CHECK:  %3 = shl i64 %2, 3
110; CHECK:  %ShiftAmt = trunc i64 %3 to i32
111; CHECK:  %Mask = shl i32 65535, %ShiftAmt
112; CHECK:  %Inv_Mask = xor i32 %Mask, -1
113; CHECK:  %4 = zext i16 %val to i32
114; CHECK:  %ValOperand_Shifted = shl i32 %4, %ShiftAmt
115; CHECK:  %5 = load i32, i32* %AlignedAddr, align 4
116; CHECK:  br label %atomicrmw.start
117; CHECK:atomicrmw.start:
118; CHECK:  %loaded = phi i32 [ %5, %entry ], [ %newloaded, %atomicrmw.start ]
119; CHECK:  %new = add i32 %loaded, %ValOperand_Shifted
120; CHECK:  %6 = and i32 %new, %Mask
121; CHECK:  %7 = and i32 %loaded, %Inv_Mask
122; CHECK:  %8 = or i32 %7, %6
123; CHECK:  %9 = cmpxchg i32* %AlignedAddr, i32 %loaded, i32 %8 monotonic monotonic
124; CHECK:  %success = extractvalue { i32, i1 } %9, 1
125; CHECK:  %newloaded = extractvalue { i32, i1 } %9, 0
126; CHECK:  br i1 %success, label %atomicrmw.end, label %atomicrmw.start
127; CHECK:atomicrmw.end:
128; CHECK:  %shifted = lshr i32 %newloaded, %ShiftAmt
129; CHECK:  %extracted = trunc i32 %shifted to i16
130; CHECK:  fence seq_cst
131; CHECK:  ret i16 %extracted
132define i16 @test_add_i16(i16* %arg, i16 %val) {
133entry:
134  %ret = atomicrmw add i16* %arg, i16 %val seq_cst
135  ret i16 %ret
136}
137
138; CHECK-LABEL: @test_xor_i16(
139; (I'm going to just assert on the bits that differ from add, above.)
140; CHECK:atomicrmw.start:
141; CHECK:  %new = xor i32 %loaded, %ValOperand_Shifted
142; CHECK:  %6 = cmpxchg i32* %AlignedAddr, i32 %loaded, i32 %new monotonic monotonic
143; CHECK:atomicrmw.end:
144define i16 @test_xor_i16(i16* %arg, i16 %val) {
145entry:
146  %ret = atomicrmw xor i16* %arg, i16 %val seq_cst
147  ret i16 %ret
148}
149
150; CHECK-LABEL: @test_or_i16(
151; (I'm going to just assert on the bits that differ from add, above.)
152; CHECK:atomicrmw.start:
153; CHECK:  %new = or i32 %loaded, %ValOperand_Shifted
154; CHECK:  %6 = cmpxchg i32* %AlignedAddr, i32 %loaded, i32 %new monotonic monotonic
155; CHECK:atomicrmw.end:
156define i16 @test_or_i16(i16* %arg, i16 %val) {
157entry:
158  %ret = atomicrmw or i16* %arg, i16 %val seq_cst
159  ret i16 %ret
160}
161
162; CHECK-LABEL: @test_and_i16(
163; (I'm going to just assert on the bits that differ from add, above.)
164; CHECK:  %AndOperand = or i32 %Inv_Mask, %ValOperand_Shifted
165; CHECK:atomicrmw.start:
166; CHECK:  %new = and i32 %loaded, %AndOperand
167; CHECK:  %6 = cmpxchg i32* %AlignedAddr, i32 %loaded, i32 %new monotonic monotonic
168; CHECK:atomicrmw.end:
169define i16 @test_and_i16(i16* %arg, i16 %val) {
170entry:
171  %ret = atomicrmw and i16* %arg, i16 %val seq_cst
172  ret i16 %ret
173}
174
175; CHECK-LABEL: @test_min_i16(
176; CHECK:atomicrmw.start:
177; CHECK:  %shifted = lshr i32 %loaded, %ShiftAmt
178; CHECK:  %extracted = trunc i32 %shifted to i16
179; CHECK:  %6 = icmp sle i16 %extracted, %val
180; CHECK:  %new = select i1 %6, i16 %extracted, i16 %val
181; CHECK:  %extended = zext i16 %new to i32
182; CHECK:  %shifted1 = shl nuw i32 %extended, %ShiftAmt
183; CHECK:  %unmasked = and i32 %loaded, %Inv_Mask
184; CHECK:  %inserted = or i32 %unmasked, %shifted1
185; CHECK:  %7 = cmpxchg i32* %AlignedAddr, i32 %loaded, i32 %inserted monotonic monotonic
186; CHECK:atomicrmw.end:
187define i16 @test_min_i16(i16* %arg, i16 %val) {
188entry:
189  %ret = atomicrmw min i16* %arg, i16 %val seq_cst
190  ret i16 %ret
191}
192