1; RUN: llc < %s -mtriple=powerpc-apple-darwin -march=ppc32 -verify-machineinstrs | FileCheck %s --check-prefix=CHECK --check-prefix=PPC32 2; FIXME: -verify-machineinstrs currently fail on ppc64 (mismatched register/instruction). 3; This is already checked for in Atomics-64.ll 4; RUN: llc < %s -mtriple=powerpc-apple-darwin -march=ppc64 | FileCheck %s --check-prefix=CHECK --check-prefix=PPC64 5 6; FIXME: we don't currently check for the operations themselves with CHECK-NEXT, 7; because they are implemented in a very messy way with lwarx/stwcx. 8; It should be fixed soon in another patch. 9 10; We first check loads, for all sizes from i8 to i64. 11; We also vary orderings to check for barriers. 12define i8 @load_i8_unordered(i8* %mem) { 13; CHECK-LABEL: load_i8_unordered 14; CHECK: lbz 15; CHECK-NOT: sync 16 %val = load atomic i8, i8* %mem unordered, align 1 17 ret i8 %val 18} 19define i16 @load_i16_monotonic(i16* %mem) { 20; CHECK-LABEL: load_i16_monotonic 21; CHECK: lhz 22; CHECK-NOT: sync 23 %val = load atomic i16, i16* %mem monotonic, align 2 24 ret i16 %val 25} 26define i32 @load_i32_acquire(i32* %mem) { 27; CHECK-LABEL: load_i32_acquire 28; CHECK: lwz 29 %val = load atomic i32, i32* %mem acquire, align 4 30; CHECK: lwsync 31 ret i32 %val 32} 33define i64 @load_i64_seq_cst(i64* %mem) { 34; CHECK-LABEL: load_i64_seq_cst 35; CHECK: sync 36; PPC32: __sync_ 37; PPC64-NOT: __sync_ 38; PPC64: ld 39 %val = load atomic i64, i64* %mem seq_cst, align 8 40; CHECK: lwsync 41 ret i64 %val 42} 43 44; Stores 45define void @store_i8_unordered(i8* %mem) { 46; CHECK-LABEL: store_i8_unordered 47; CHECK-NOT: sync 48; CHECK: stb 49 store atomic i8 42, i8* %mem unordered, align 1 50 ret void 51} 52define void @store_i16_monotonic(i16* %mem) { 53; CHECK-LABEL: store_i16_monotonic 54; CHECK-NOT: sync 55; CHECK: sth 56 store atomic i16 42, i16* %mem monotonic, align 2 57 ret void 58} 59define void @store_i32_release(i32* %mem) { 60; CHECK-LABEL: store_i32_release 61; CHECK: lwsync 62; CHECK: stw 63 store atomic i32 42, i32* %mem release, align 4 64 ret void 65} 66define void @store_i64_seq_cst(i64* %mem) { 67; CHECK-LABEL: store_i64_seq_cst 68; CHECK: sync 69; PPC32: __sync_ 70; PPC64-NOT: __sync_ 71; PPC64: std 72 store atomic i64 42, i64* %mem seq_cst, align 8 73 ret void 74} 75 76; Atomic CmpXchg 77define i8 @cas_strong_i8_sc_sc(i8* %mem) { 78; CHECK-LABEL: cas_strong_i8_sc_sc 79; CHECK: sync 80 %val = cmpxchg i8* %mem, i8 0, i8 1 seq_cst seq_cst 81; CHECK: lwsync 82 %loaded = extractvalue { i8, i1} %val, 0 83 ret i8 %loaded 84} 85define i16 @cas_weak_i16_acquire_acquire(i16* %mem) { 86; CHECK-LABEL: cas_weak_i16_acquire_acquire 87;CHECK-NOT: sync 88 %val = cmpxchg weak i16* %mem, i16 0, i16 1 acquire acquire 89; CHECK: lwsync 90 %loaded = extractvalue { i16, i1} %val, 0 91 ret i16 %loaded 92} 93define i32 @cas_strong_i32_acqrel_acquire(i32* %mem) { 94; CHECK-LABEL: cas_strong_i32_acqrel_acquire 95; CHECK: lwsync 96 %val = cmpxchg i32* %mem, i32 0, i32 1 acq_rel acquire 97; CHECK: lwsync 98 %loaded = extractvalue { i32, i1} %val, 0 99 ret i32 %loaded 100} 101define i64 @cas_weak_i64_release_monotonic(i64* %mem) { 102; CHECK-LABEL: cas_weak_i64_release_monotonic 103; CHECK: lwsync 104 %val = cmpxchg weak i64* %mem, i64 0, i64 1 release monotonic 105; CHECK-NOT: [sync ] 106 %loaded = extractvalue { i64, i1} %val, 0 107 ret i64 %loaded 108} 109 110; AtomicRMW 111define i8 @add_i8_monotonic(i8* %mem, i8 %operand) { 112; CHECK-LABEL: add_i8_monotonic 113; CHECK-NOT: sync 114 %val = atomicrmw add i8* %mem, i8 %operand monotonic 115 ret i8 %val 116} 117define i16 @xor_i16_seq_cst(i16* %mem, i16 %operand) { 118; CHECK-LABEL: xor_i16_seq_cst 119; CHECK: sync 120 %val = atomicrmw xor i16* %mem, i16 %operand seq_cst 121; CHECK: lwsync 122 ret i16 %val 123} 124define i32 @xchg_i32_acq_rel(i32* %mem, i32 %operand) { 125; CHECK-LABEL: xchg_i32_acq_rel 126; CHECK: lwsync 127 %val = atomicrmw xchg i32* %mem, i32 %operand acq_rel 128; CHECK: lwsync 129 ret i32 %val 130} 131define i64 @and_i64_release(i64* %mem, i64 %operand) { 132; CHECK-LABEL: and_i64_release 133; CHECK: lwsync 134 %val = atomicrmw and i64* %mem, i64 %operand release 135; CHECK-NOT: [sync ] 136 ret i64 %val 137} 138