/external/swiftshader/third_party/llvm-7.0/llvm/test/Bitcode/ |
D | cmpxchg-upgrade.ll | 4 ; cmpxchg-upgrade.ll.bc was produced by running a version of llvm-as from just 8 cmpxchg i32* %addr, i32 42, i32 0 monotonic 9 ; CHECK: cmpxchg i32* %addr, i32 42, i32 0 monotonic monotonic 11 cmpxchg i32* %addr, i32 42, i32 0 acquire 12 ; CHECK: cmpxchg i32* %addr, i32 42, i32 0 acquire acquire 14 cmpxchg i32* %addr, i32 42, i32 0 release 15 ; CHECK: cmpxchg i32* %addr, i32 42, i32 0 release monotonic 17 cmpxchg i32* %addr, i32 42, i32 0 acq_rel 18 ; CHECK: cmpxchg i32* %addr, i32 42, i32 0 acq_rel acquire 20 cmpxchg i32* %addr, i32 42, i32 0 seq_cst [all …]
|
D | atomic.ll | 5 cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst seq_cst 6 ; CHECK: cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst seq_cst 8 cmpxchg volatile i32* %addr, i32 %desired, i32 %new seq_cst monotonic 9 ; CHECK: cmpxchg volatile i32* %addr, i32 %desired, i32 %new seq_cst monotonic 11 cmpxchg weak i32* %addr, i32 %desired, i32 %new acq_rel acquire 12 ; CHECK: cmpxchg weak i32* %addr, i32 %desired, i32 %new acq_rel acquire 14 …cmpxchg weak volatile i32* %addr, i32 %desired, i32 %new syncscope("singlethread") release monoton… 15 …; CHECK: cmpxchg weak volatile i32* %addr, i32 %desired, i32 %new syncscope("singlethread") releas…
|
D | memInstructions.3.2.ll | 223 define void @cmpxchg(i32* %ptr,i32 %cmp,i32 %new){ 225 ;cmpxchg [volatile] <ty>* <pointer>, <ty> <cmp>, <ty> <new> [singlethread] <ordering> 227 ; CHECK: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new monotonic monotonic 229 %res1 = cmpxchg i32* %ptr, i32 %cmp, i32 %new monotonic monotonic 231 ; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new monotonic monoton… 233 %res2 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new monotonic monotonic 235 ; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") … 237 %res3 = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") monotonic monotonic 239 ; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("single… 241 …%res4 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") monotonic monoton… [all …]
|
/external/llvm/test/Bitcode/ |
D | cmpxchg-upgrade.ll | 4 ; cmpxchg-upgrade.ll.bc was produced by running a version of llvm-as from just 8 cmpxchg i32* %addr, i32 42, i32 0 monotonic 9 ; CHECK: cmpxchg i32* %addr, i32 42, i32 0 monotonic monotonic 11 cmpxchg i32* %addr, i32 42, i32 0 acquire 12 ; CHECK: cmpxchg i32* %addr, i32 42, i32 0 acquire acquire 14 cmpxchg i32* %addr, i32 42, i32 0 release 15 ; CHECK: cmpxchg i32* %addr, i32 42, i32 0 release monotonic 17 cmpxchg i32* %addr, i32 42, i32 0 acq_rel 18 ; CHECK: cmpxchg i32* %addr, i32 42, i32 0 acq_rel acquire 20 cmpxchg i32* %addr, i32 42, i32 0 seq_cst [all …]
|
D | atomic.ll | 5 cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst seq_cst 6 ; CHECK: cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst seq_cst 8 cmpxchg volatile i32* %addr, i32 %desired, i32 %new seq_cst monotonic 9 ; CHECK: cmpxchg volatile i32* %addr, i32 %desired, i32 %new seq_cst monotonic 11 cmpxchg weak i32* %addr, i32 %desired, i32 %new acq_rel acquire 12 ; CHECK: cmpxchg weak i32* %addr, i32 %desired, i32 %new acq_rel acquire 14 cmpxchg weak volatile i32* %addr, i32 %desired, i32 %new singlethread release monotonic 15 ; CHECK: cmpxchg weak volatile i32* %addr, i32 %desired, i32 %new singlethread release monotonic
|
D | memInstructions.3.2.ll | 223 define void @cmpxchg(i32* %ptr,i32 %cmp,i32 %new){ 225 ;cmpxchg [volatile] <ty>* <pointer>, <ty> <cmp>, <ty> <new> [singlethread] <ordering> 227 ; CHECK: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new monotonic monotonic 229 %res1 = cmpxchg i32* %ptr, i32 %cmp, i32 %new monotonic monotonic 231 ; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new monotonic monoton… 233 %res2 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new monotonic monotonic 235 ; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread monotonic mon… 237 %res3 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread monotonic monotonic 239 ; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread mono… 241 %res4 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread monotonic monotonic [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/AtomicExpand/SPARC/ |
D | partword.ll | 3 ;; Verify the cmpxchg and atomicrmw expansions where sub-word-size 7 ;;; doesn't support cmpxchg of sub-word sizes would do. 28 ; CHECK: br label %partword.cmpxchg.loop 29 ; CHECK:partword.cmpxchg.loop: 30 ; CHECK: %10 = phi i32 [ %9, %entry ], [ %16, %partword.cmpxchg.failure ] 33 ; CHECK: %13 = cmpxchg i32* %AlignedAddr, i32 %12, i32 %11 monotonic monotonic 36 ; CHECK: br i1 %15, label %partword.cmpxchg.end, label %partword.cmpxchg.failure 37 ; CHECK:partword.cmpxchg.failure: 40 ; CHECK: br i1 %17, label %partword.cmpxchg.loop, label %partword.cmpxchg.end 41 ; CHECK:partword.cmpxchg.end: [all …]
|
/external/llvm/test/Transforms/AtomicExpand/SPARC/ |
D | partword.ll | 3 ;; Verify the cmpxchg and atomicrmw expansions where sub-word-size 7 ;;; doesn't support cmpxchg of sub-word sizes would do. 28 ; CHECK: br label %partword.cmpxchg.loop 29 ; CHECK:partword.cmpxchg.loop: 30 ; CHECK: %10 = phi i32 [ %9, %entry ], [ %16, %partword.cmpxchg.failure ] 33 ; CHECK: %13 = cmpxchg i32* %AlignedAddr, i32 %12, i32 %11 monotonic monotonic 36 ; CHECK: br i1 %15, label %partword.cmpxchg.end, label %partword.cmpxchg.failure 37 ; CHECK:partword.cmpxchg.failure: 40 ; CHECK: br i1 %17, label %partword.cmpxchg.loop, label %partword.cmpxchg.end 41 ; CHECK:partword.cmpxchg.end: [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/ |
D | memory-legalizer-atomic-cmpxchg.ll | 13 %val = cmpxchg volatile i32* %gep, i32 %old, i32 %in monotonic monotonic 26 %val = cmpxchg volatile i32* %gep, i32 %old, i32 %in acquire monotonic 39 %val = cmpxchg volatile i32* %gep, i32 %old, i32 %in release monotonic 52 %val = cmpxchg volatile i32* %gep, i32 %old, i32 %in acq_rel monotonic 65 %val = cmpxchg volatile i32* %gep, i32 %old, i32 %in seq_cst monotonic 78 %val = cmpxchg volatile i32* %gep, i32 %old, i32 %in acquire acquire 91 %val = cmpxchg volatile i32* %gep, i32 %old, i32 %in release acquire 104 %val = cmpxchg volatile i32* %gep, i32 %old, i32 %in acq_rel acquire 117 %val = cmpxchg volatile i32* %gep, i32 %old, i32 %in seq_cst acquire 130 %val = cmpxchg volatile i32* %gep, i32 %old, i32 %in seq_cst seq_cst [all …]
|
/external/swiftshader/third_party/subzero/tests_lit/llvm2ice_tests/ |
D | nacl-atomic-cmpxchg-optimization.ll | 1 ; This tests the optimization of atomic cmpxchg w/ following cmp + branches. 8 declare i32 @llvm.nacl.atomic.cmpxchg.i32(i32*, i32, i32, i32, i32) 11 ; Test that a cmpxchg followed by icmp eq and branch can be optimized to 12 ; reuse the flags set by the cmpxchg instruction itself. 26 %old = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected_loop, 36 ; O2: lock cmpxchg DWORD PTR [e{{[^a].}}],e{{[^a]}} 43 ; OM1: lock cmpxchg DWORD PTR [e{{[^a].}}],e{{[^a]}} 57 %old = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected_loop, 66 ; O2: lock cmpxchg DWORD PTR [e{{[^a].}}],e{{[^a]}} 79 %old = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 0, [all …]
|
D | abi-atomics.ll | 21 declare i8 @llvm.nacl.atomic.cmpxchg.i8(i8*, i8, i8, i32, i32) 22 declare i16 @llvm.nacl.atomic.cmpxchg.i16(i16*, i16, i16, i32, i32) 23 declare i32 @llvm.nacl.atomic.cmpxchg.i32(i32*, i32, i32, i32, i32) 24 declare i64 @llvm.nacl.atomic.cmpxchg.i64(i64*, i64, i64, i32, i32) 207 ; cmpxchg 211 %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 7, i32 7) 218 %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 0, i32 0) 227 %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 6, i32 6) 234 %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 6, i32 5) 241 %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 6, i32 4) [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Assembler/ |
D | atomic.ll | 18 ; CHECK: cmpxchg i32* %x, i32 1, i32 0 syncscope("singlethread") monotonic monotonic 19 cmpxchg i32* %x, i32 1, i32 0 syncscope("singlethread") monotonic monotonic 20 ; CHECK: cmpxchg i32* %x, i32 1, i32 0 syncscope("workitem") monotonic monotonic 21 cmpxchg i32* %x, i32 1, i32 0 syncscope("workitem") monotonic monotonic 22 ; CHECK: cmpxchg volatile i32* %x, i32 0, i32 1 acq_rel acquire 23 cmpxchg volatile i32* %x, i32 0, i32 1 acq_rel acquire 24 ; CHECK: cmpxchg i32* %x, i32 42, i32 0 acq_rel monotonic 25 cmpxchg i32* %x, i32 42, i32 0 acq_rel monotonic 26 ; CHECK: cmpxchg weak i32* %x, i32 13, i32 0 seq_cst monotonic 27 cmpxchg weak i32* %x, i32 13, i32 0 seq_cst monotonic
|
/external/llvm/test/Assembler/ |
D | atomic.ll | 14 ; CHECK: cmpxchg i32* %x, i32 1, i32 0 singlethread monotonic monotonic 15 cmpxchg i32* %x, i32 1, i32 0 singlethread monotonic monotonic 16 ; CHECK: cmpxchg volatile i32* %x, i32 0, i32 1 acq_rel acquire 17 cmpxchg volatile i32* %x, i32 0, i32 1 acq_rel acquire 18 ; CHECK: cmpxchg i32* %x, i32 42, i32 0 acq_rel monotonic 19 cmpxchg i32* %x, i32 42, i32 0 acq_rel monotonic 20 ; CHECK: cmpxchg weak i32* %x, i32 13, i32 0 seq_cst monotonic 21 cmpxchg weak i32* %x, i32 13, i32 0 seq_cst monotonic
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/SystemZ/ |
D | cmpxchg-03.ll | 10 %pair = cmpxchg i32 *%src, i32 %cmp, i32 %swap seq_cst seq_cst 21 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst 32 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst 43 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst 56 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst 67 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst 78 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst 91 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst 104 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst 118 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst [all …]
|
D | cmpxchg-04.ll | 10 %pairval = cmpxchg i64 *%src, i64 %cmp, i64 %swap seq_cst seq_cst 21 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst 34 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst 45 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst 56 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst 69 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst 82 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst 93 %pairval = cmpxchg i64 *%ptr, i64 1001, i64 %swap seq_cst seq_cst 104 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 1002 seq_cst seq_cst 117 %pairval = cmpxchg i64 *%src, i64 %cmp, i64 %swap seq_cst seq_cst [all …]
|
D | cmpxchg-06.ll | 16 %pairval = cmpxchg i128 *%src, i128 %cmp, i128 %swap seq_cst seq_cst 27 %pairval = cmpxchg i128 *%ptr, i128 %cmp, i128 %swap seq_cst seq_cst 40 %pairval = cmpxchg i128 *%ptr, i128 %cmp, i128 %swap seq_cst seq_cst 51 %pairval = cmpxchg i128 *%ptr, i128 %cmp, i128 %swap seq_cst seq_cst 62 %pairval = cmpxchg i128 *%ptr, i128 %cmp, i128 %swap seq_cst seq_cst 75 %pairval = cmpxchg i128 *%ptr, i128 %cmp, i128 %swap seq_cst seq_cst 88 %pairval = cmpxchg i128 *%ptr, i128 %cmp, i128 %swap seq_cst seq_cst 99 %pairval = cmpxchg i128 *%ptr, i128 1001, i128 %swap seq_cst seq_cst 110 %pairval = cmpxchg i128 *%ptr, i128 %cmp, i128 1002 seq_cst seq_cst 127 %pairval = cmpxchg i128 *%src, i128 %cmp, i128 %swap seq_cst seq_cst [all …]
|
D | cmpxchg-05.ll | 1 ; Test proper extension of 8-bit/16-bit cmpxchg. 13 %cx = cmpxchg i8* %0, i8 %1, i8 %2 seq_cst seq_cst 26 %cx = cmpxchg i16* %0, i16 %1, i16 %2 seq_cst seq_cst 39 %cx = cmpxchg i8* %0, i8 %1, i8 %2 seq_cst seq_cst 52 %cx = cmpxchg i16* %0, i16 %1, i16 %2 seq_cst seq_cst
|
/external/llvm/test/CodeGen/SystemZ/ |
D | cmpxchg-03.ll | 10 %pair = cmpxchg i32 *%src, i32 %cmp, i32 %swap seq_cst seq_cst 21 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst 32 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst 43 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst 56 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst 67 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst 78 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst 91 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst 104 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst 118 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst [all …]
|
D | cmpxchg-05.ll | 1 ; Test proper extension of 8-bit/16-bit cmpxchg. 13 %cx = cmpxchg i8* %0, i8 %1, i8 %2 seq_cst seq_cst 26 %cx = cmpxchg i16* %0, i16 %1, i16 %2 seq_cst seq_cst 39 %cx = cmpxchg i8* %0, i8 %1, i8 %2 seq_cst seq_cst 52 %cx = cmpxchg i16* %0, i16 %1, i16 %2 seq_cst seq_cst 62 %cx = cmpxchg i8* %0, i8 %1, i8 %2 seq_cst seq_cst 74 %cx = cmpxchg i8* %0, i8 %1, i8 %2 seq_cst seq_cst
|
D | cmpxchg-04.ll | 10 %pairval = cmpxchg i64 *%src, i64 %cmp, i64 %swap seq_cst seq_cst 21 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst 34 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst 45 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst 56 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst 69 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst 82 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst 93 %pairval = cmpxchg i64 *%ptr, i64 1001, i64 %swap seq_cst seq_cst 104 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 1002 seq_cst seq_cst
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/ |
D | select-cmpxchg.ll | 5 ; CHECK-NEXT: %tmp0 = cmpxchg i64* %ptr, i64 %compare, i64 %new_value seq_cst seq_cst 9 %tmp0 = cmpxchg i64* %ptr, i64 %compare, i64 %new_value seq_cst seq_cst 18 ; CHECK-NEXT: %tmp0 = cmpxchg i64* %ptr, i64 %compare, i64 %new_value seq_cst seq_cst 21 %tmp0 = cmpxchg i64* %ptr, i64 %compare, i64 %new_value seq_cst seq_cst 30 ; CHECK-NEXT: %tmp0 = cmpxchg i64* %ptr, i64 %compare, i64 %new_value acq_rel monotonic 33 %tmp0 = cmpxchg i64* %ptr, i64 %compare, i64 %new_value acq_rel monotonic
|
/external/llvm/test/CodeGen/X86/ |
D | peephole-na-phys-copy-folding.ll | 120 ; CHECK: cmpxchg 127 ; cmpxchg sets EFLAGS, call clobbers it, then br uses EFLAGS. 128 %cx = cmpxchg i64* %foo, i64 %bar, i64 %baz seq_cst seq_cst 142 ; CHECK: cmpxchg 145 ; Save result of the first cmpxchg into a temporary. 154 ; CHECK: cmpxchg 156 ; Save result of the second cmpxchg onto the stack. 158 ; Restore result of the first cmpxchg from D, put it back in EFLAGS. 162 ; Restore result of the second cmpxchg from the stack. 164 ; Test from EFLAGS restored from first cmpxchg, jump if that fails. [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/AtomicExpand/X86/ |
D | expand-atomic-non-integer.ll | 87 ; CHECK: %3 = cmpxchg i64* %1, i64 0, i64 %2 seq_cst monotonic 93 cmpxchg i8** %ptr, i8* null, i8* %v seq_cst monotonic 101 ; CHECK: %3 = cmpxchg i64* %1, i64 0, i64 %2 release monotonic 107 cmpxchg i8** %ptr, i8* null, i8* %v release monotonic 115 ; CHECK: %3 = cmpxchg i64* %1, i64 0, i64 %2 seq_cst seq_cst 121 cmpxchg i8** %ptr, i8* null, i8* %v seq_cst seq_cst 129 ; CHECK: %3 = cmpxchg weak i64* %1, i64 0, i64 %2 seq_cst seq_cst 135 cmpxchg weak i8** %ptr, i8* null, i8* %v seq_cst seq_cst 143 ; CHECK: %3 = cmpxchg volatile i64* %1, i64 0, i64 %2 seq_cst seq_cst 149 cmpxchg volatile i8** %ptr, i8* null, i8* %v seq_cst seq_cst [all …]
|
/external/llvm/test/Transforms/AtomicExpand/X86/ |
D | expand-atomic-non-integer.ll | 87 ; CHECK: %3 = cmpxchg i64* %1, i64 0, i64 %2 seq_cst monotonic 93 cmpxchg i8** %ptr, i8* null, i8* %v seq_cst monotonic 101 ; CHECK: %3 = cmpxchg i64* %1, i64 0, i64 %2 release monotonic 107 cmpxchg i8** %ptr, i8* null, i8* %v release monotonic 115 ; CHECK: %3 = cmpxchg i64* %1, i64 0, i64 %2 seq_cst seq_cst 121 cmpxchg i8** %ptr, i8* null, i8* %v seq_cst seq_cst 129 ; CHECK: %3 = cmpxchg weak i64* %1, i64 0, i64 %2 seq_cst seq_cst 135 cmpxchg weak i8** %ptr, i8* null, i8* %v seq_cst seq_cst 143 ; CHECK: %3 = cmpxchg volatile i64* %1, i64 0, i64 %2 seq_cst seq_cst 149 cmpxchg volatile i8** %ptr, i8* null, i8* %v seq_cst seq_cst [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/RISCV/ |
D | atomic-cmpxchg.ll | 18 %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val monotonic monotonic 35 %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acquire monotonic 52 %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acquire acquire 69 %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release monotonic 86 %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release acquire 103 %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acq_rel monotonic 120 %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acq_rel acquire 137 %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst monotonic 154 %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst acquire 171 %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst seq_cst [all …]
|