• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2; RUN: llc -mtriple=aarch64-- -mcpu=falkor -mattr=+lse -O0 -aarch64-enable-atomic-cfg-tidy=0 -stop-after=irtranslator -global-isel -verify-machineinstrs %s -o - | FileCheck %s
3
4define i32 @atomicrmw_volatile(i32* %ptr) {
5  ; CHECK-LABEL: name: atomicrmw_volatile
6  ; CHECK: bb.1 (%ir-block.0):
7  ; CHECK:   liveins: $x0
8  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $x0
9  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
10  ; CHECK:   [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: (volatile load store monotonic 4 on %ir.ptr)
11  ; CHECK:   $w0 = COPY [[ATOMICRMW_ADD]](s32)
12  ; CHECK:   RET_ReallyLR implicit $w0
13  %oldval = atomicrmw volatile add i32* %ptr, i32 1 monotonic
14  ret i32 %oldval
15}
16
17define i32 @atomicrmw_falkor(i32* %ptr) {
18  ; CHECK-LABEL: name: atomicrmw_falkor
19  ; CHECK: bb.1 (%ir-block.0):
20  ; CHECK:   liveins: $x0
21  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $x0
22  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
23  ; CHECK:   [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: ("aarch64-strided-access" load store monotonic 4 on %ir.ptr)
24  ; CHECK:   $w0 = COPY [[ATOMICRMW_ADD]](s32)
25  ; CHECK:   RET_ReallyLR implicit $w0
26  %oldval = atomicrmw add i32* %ptr, i32 1 monotonic, !falkor.strided.access !0
27  ret i32 %oldval
28}
29
30define i32 @atomicrmw_volatile_falkor(i32* %ptr) {
31  ; CHECK-LABEL: name: atomicrmw_volatile_falkor
32  ; CHECK: bb.1 (%ir-block.0):
33  ; CHECK:   liveins: $x0
34  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $x0
35  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
36  ; CHECK:   [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: (volatile "aarch64-strided-access" load store monotonic 4 on %ir.ptr)
37  ; CHECK:   $w0 = COPY [[ATOMICRMW_ADD]](s32)
38  ; CHECK:   RET_ReallyLR implicit $w0
39  %oldval = atomicrmw volatile add i32* %ptr, i32 1 monotonic, !falkor.strided.access !0
40  ret i32 %oldval
41}
42
43define i32 @cmpxchg_volatile(i32* %addr) {
44  ; CHECK-LABEL: name: cmpxchg_volatile
45  ; CHECK: bb.1 (%ir-block.0):
46  ; CHECK:   liveins: $x0
47  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $x0
48  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
49  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
50  ; CHECK:   [[ATOMIC_CMPXCHG_WITH_SUCCESS:%[0-9]+]]:_(s32), [[ATOMIC_CMPXCHG_WITH_SUCCESS1:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[COPY]](p0), [[C]], [[C1]] :: (volatile load store monotonic monotonic 4 on %ir.addr)
51  ; CHECK:   $w0 = COPY [[ATOMIC_CMPXCHG_WITH_SUCCESS]](s32)
52  ; CHECK:   RET_ReallyLR implicit $w0
53  %val_success = cmpxchg volatile i32* %addr, i32 0, i32 1 monotonic monotonic
54  %value_loaded = extractvalue { i32, i1 } %val_success, 0
55  ret i32 %value_loaded
56}
57
58define i32 @cmpxchg_falkor(i32* %addr) {
59  ; CHECK-LABEL: name: cmpxchg_falkor
60  ; CHECK: bb.1 (%ir-block.0):
61  ; CHECK:   liveins: $x0
62  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $x0
63  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
64  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
65  ; CHECK:   [[ATOMIC_CMPXCHG_WITH_SUCCESS:%[0-9]+]]:_(s32), [[ATOMIC_CMPXCHG_WITH_SUCCESS1:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[COPY]](p0), [[C]], [[C1]] :: ("aarch64-strided-access" load store monotonic monotonic 4 on %ir.addr)
66  ; CHECK:   $w0 = COPY [[ATOMIC_CMPXCHG_WITH_SUCCESS]](s32)
67  ; CHECK:   RET_ReallyLR implicit $w0
68  %val_success = cmpxchg i32* %addr, i32 0, i32 1 monotonic monotonic, !falkor.strided.access !0
69  %value_loaded = extractvalue { i32, i1 } %val_success, 0
70  ret i32 %value_loaded
71}
72
73define i32 @cmpxchg_volatile_falkor(i32* %addr) {
74  ; CHECK-LABEL: name: cmpxchg_volatile_falkor
75  ; CHECK: bb.1 (%ir-block.0):
76  ; CHECK:   liveins: $x0
77  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $x0
78  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
79  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
80  ; CHECK:   [[ATOMIC_CMPXCHG_WITH_SUCCESS:%[0-9]+]]:_(s32), [[ATOMIC_CMPXCHG_WITH_SUCCESS1:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[COPY]](p0), [[C]], [[C1]] :: (volatile "aarch64-strided-access" load store monotonic monotonic 4 on %ir.addr)
81  ; CHECK:   $w0 = COPY [[ATOMIC_CMPXCHG_WITH_SUCCESS]](s32)
82  ; CHECK:   RET_ReallyLR implicit $w0
83  %val_success = cmpxchg volatile i32* %addr, i32 0, i32 1 monotonic monotonic, !falkor.strided.access !0
84  %value_loaded = extractvalue { i32, i1 } %val_success, 0
85  ret i32 %value_loaded
86}
87
88!0 = !{}
89