/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/ |
D | consecutive-fences.ll | 6 ; CHECK-NEXT: fence seq_cst 12 fence seq_cst 13 fence seq_cst 14 fence seq_cst 22 ; CHECK-NEXT: fence seq_cst 23 ; CHECK-NEXT: fence syncscope("singlethread") seq_cst 28 fence seq_cst 29 fence syncscope("singlethread") seq_cst 35 ; CHECK-NEXT: fence seq_cst 37 ; CHECK-NEXT: fence seq_cst [all …]
|
/external/clang/test/OpenMP/ |
D | atomic_ast_print.cpp | 28 #pragma omp atomic seq_cst in foo() 30 #pragma omp atomic read seq_cst in foo() 32 #pragma omp atomic seq_cst write in foo() 34 #pragma omp atomic update seq_cst in foo() 36 #pragma omp atomic seq_cst capture in foo() 38 #pragma omp atomic capture seq_cst in foo() 128 #pragma omp atomic seq_cst in main() 130 #pragma omp atomic read seq_cst in main() 132 #pragma omp atomic seq_cst write in main() 134 #pragma omp atomic update seq_cst in main() [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/SystemZ/ |
D | cmpxchg-03.ll | 10 %pair = cmpxchg i32 *%src, i32 %cmp, i32 %swap seq_cst seq_cst 21 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst 32 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst 43 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst 56 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst 67 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst 78 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst 91 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst 104 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst 118 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst [all …]
|
D | cmpxchg-04.ll | 10 %pairval = cmpxchg i64 *%src, i64 %cmp, i64 %swap seq_cst seq_cst 21 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst 34 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst 45 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst 56 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst 69 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst 82 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst 93 %pairval = cmpxchg i64 *%ptr, i64 1001, i64 %swap seq_cst seq_cst 104 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 1002 seq_cst seq_cst 117 %pairval = cmpxchg i64 *%src, i64 %cmp, i64 %swap seq_cst seq_cst [all …]
|
D | cmpxchg-06.ll | 16 %pairval = cmpxchg i128 *%src, i128 %cmp, i128 %swap seq_cst seq_cst 27 %pairval = cmpxchg i128 *%ptr, i128 %cmp, i128 %swap seq_cst seq_cst 40 %pairval = cmpxchg i128 *%ptr, i128 %cmp, i128 %swap seq_cst seq_cst 51 %pairval = cmpxchg i128 *%ptr, i128 %cmp, i128 %swap seq_cst seq_cst 62 %pairval = cmpxchg i128 *%ptr, i128 %cmp, i128 %swap seq_cst seq_cst 75 %pairval = cmpxchg i128 *%ptr, i128 %cmp, i128 %swap seq_cst seq_cst 88 %pairval = cmpxchg i128 *%ptr, i128 %cmp, i128 %swap seq_cst seq_cst 99 %pairval = cmpxchg i128 *%ptr, i128 1001, i128 %swap seq_cst seq_cst 110 %pairval = cmpxchg i128 *%ptr, i128 %cmp, i128 1002 seq_cst seq_cst 127 %pairval = cmpxchg i128 *%src, i128 %cmp, i128 %swap seq_cst seq_cst [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/ |
D | flat_atomics_i64.ll | 9 %tmp0 = atomicrmw volatile add i64* %gep, i64 %in seq_cst 19 %tmp0 = atomicrmw volatile add i64* %gep, i64 %in seq_cst 30 %tmp0 = atomicrmw volatile add i64* %gep, i64 %in seq_cst 41 %tmp0 = atomicrmw volatile add i64* %gep, i64 %in seq_cst 50 %tmp0 = atomicrmw volatile add i64* %out, i64 %in seq_cst 59 %tmp0 = atomicrmw volatile add i64* %out, i64 %in seq_cst 69 %tmp0 = atomicrmw volatile add i64* %ptr, i64 %in seq_cst 79 %tmp0 = atomicrmw volatile add i64* %ptr, i64 %in seq_cst 89 %tmp0 = atomicrmw volatile and i64* %gep, i64 %in seq_cst 99 %tmp0 = atomicrmw volatile and i64* %gep, i64 %in seq_cst [all …]
|
D | flat_atomics.ll | 11 %val = atomicrmw volatile add i32* %gep, i32 %in seq_cst 21 %val = atomicrmw volatile add i32* %gep, i32 %in seq_cst 30 %val = atomicrmw volatile add i32* %gep, i32 %in seq_cst 41 %val = atomicrmw volatile add i32* %gep, i32 %in seq_cst 53 %val = atomicrmw volatile add i32* %gep, i32 %in seq_cst 65 %val = atomicrmw volatile add i32* %gep, i32 %in seq_cst 74 %val = atomicrmw volatile add i32* %out, i32 %in seq_cst 83 %val = atomicrmw volatile add i32* %out, i32 %in seq_cst 93 %val = atomicrmw volatile add i32* %ptr, i32 %in seq_cst 103 %val = atomicrmw volatile add i32* %ptr, i32 %in seq_cst [all …]
|
D | r600.global_atomics.ll | 12 %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst 22 %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst 34 %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst 45 %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst 54 %val = atomicrmw volatile add i32 addrspace(1)* %out, i32 %in seq_cst 64 %val = atomicrmw volatile add i32 addrspace(1)* %ptr, i32 %in seq_cst 74 %val = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst 85 %val = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst 94 %val = atomicrmw volatile and i32 addrspace(1)* %out, i32 %in seq_cst 104 %val = atomicrmw volatile and i32 addrspace(1)* %ptr, i32 %in seq_cst [all …]
|
D | local-atomics64.ll | 12 %result = atomicrmw xchg i64 addrspace(3)* %ptr, i64 4 seq_cst 25 %result = atomicrmw xchg i64 addrspace(3)* %gep, i64 4 seq_cst 37 %result = atomicrmw add i64 addrspace(3)* %ptr, i64 4 seq_cst 56 %result = atomicrmw add i64 addrspace(3)* %gep, i64 9 seq_cst 71 %result = atomicrmw add i64 addrspace(3)* %ptr, i64 1 seq_cst 84 %result = atomicrmw add i64 addrspace(3)* %gep, i64 1 seq_cst 96 %result = atomicrmw sub i64 addrspace(3)* %ptr, i64 4 seq_cst 109 %result = atomicrmw sub i64 addrspace(3)* %gep, i64 4 seq_cst 124 %result = atomicrmw sub i64 addrspace(3)* %ptr, i64 1 seq_cst 137 %result = atomicrmw sub i64 addrspace(3)* %gep, i64 1 seq_cst [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/NVPTX/ |
D | atomics.ll | 7 %ret = atomicrmw add i32* %addr, i32 %val seq_cst 14 %ret = atomicrmw add i64* %addr, i64 %val seq_cst 22 %ret = atomicrmw sub i32* %subr, i32 %val seq_cst 30 %ret = atomicrmw sub i64* %subr, i64 %val seq_cst 37 %ret = atomicrmw and i32* %subr, i32 %val seq_cst 44 %ret = atomicrmw and i64* %subr, i64 %val seq_cst 50 ; %ret = atomicrmw nand i32* %subr, i32 %val seq_cst 55 ; %ret = atomicrmw nand i64* %subr, i64 %val seq_cst 62 %ret = atomicrmw or i32* %subr, i32 %val seq_cst 69 %ret = atomicrmw or i64* %subr, i64 %val seq_cst [all …]
|
/external/llvm/test/CodeGen/SystemZ/ |
D | cmpxchg-03.ll | 10 %pair = cmpxchg i32 *%src, i32 %cmp, i32 %swap seq_cst seq_cst 21 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst 32 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst 43 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst 56 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst 67 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst 78 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst 91 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst 104 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst 118 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst [all …]
|
D | cmpxchg-04.ll | 10 %pairval = cmpxchg i64 *%src, i64 %cmp, i64 %swap seq_cst seq_cst 21 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst 34 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst 45 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst 56 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst 69 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst 82 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst 93 %pairval = cmpxchg i64 *%ptr, i64 1001, i64 %swap seq_cst seq_cst 104 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 1002 seq_cst seq_cst
|
D | cmpxchg-05.ll | 13 %cx = cmpxchg i8* %0, i8 %1, i8 %2 seq_cst seq_cst 26 %cx = cmpxchg i16* %0, i16 %1, i16 %2 seq_cst seq_cst 39 %cx = cmpxchg i8* %0, i8 %1, i8 %2 seq_cst seq_cst 52 %cx = cmpxchg i16* %0, i16 %1, i16 %2 seq_cst seq_cst 62 %cx = cmpxchg i8* %0, i8 %1, i8 %2 seq_cst seq_cst 74 %cx = cmpxchg i8* %0, i8 %1, i8 %2 seq_cst seq_cst
|
/external/llvm/test/CodeGen/NVPTX/ |
D | atomics.ll | 7 %ret = atomicrmw add i32* %addr, i32 %val seq_cst 14 %ret = atomicrmw add i64* %addr, i64 %val seq_cst 22 %ret = atomicrmw sub i32* %subr, i32 %val seq_cst 30 %ret = atomicrmw sub i64* %subr, i64 %val seq_cst 37 %ret = atomicrmw and i32* %subr, i32 %val seq_cst 44 %ret = atomicrmw and i64* %subr, i64 %val seq_cst 50 ; %ret = atomicrmw nand i32* %subr, i32 %val seq_cst 55 ; %ret = atomicrmw nand i64* %subr, i64 %val seq_cst 62 %ret = atomicrmw or i32* %subr, i32 %val seq_cst 69 %ret = atomicrmw or i64* %subr, i64 %val seq_cst [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | nocx16.ll | 5 %0 = cmpxchg i128* %a, i128 1, i128 1 seq_cst seq_cst 7 %1 = atomicrmw xchg i128* %a, i128 1 seq_cst 9 %2 = atomicrmw add i128* %a, i128 1 seq_cst 11 %3 = atomicrmw sub i128* %a, i128 1 seq_cst 13 %4 = atomicrmw and i128* %a, i128 1 seq_cst 15 %5 = atomicrmw nand i128* %a, i128 1 seq_cst 17 %6 = atomicrmw or i128* %a, i128 1 seq_cst 19 %7 = atomicrmw xor i128* %a, i128 1 seq_cst
|
/external/llvm/test/CodeGen/X86/ |
D | nocx16.ll | 5 %0 = cmpxchg i128* %a, i128 1, i128 1 seq_cst seq_cst 7 %1 = atomicrmw xchg i128* %a, i128 1 seq_cst 9 %2 = atomicrmw add i128* %a, i128 1 seq_cst 11 %3 = atomicrmw sub i128* %a, i128 1 seq_cst 13 %4 = atomicrmw and i128* %a, i128 1 seq_cst 15 %5 = atomicrmw nand i128* %a, i128 1 seq_cst 17 %6 = atomicrmw or i128* %a, i128 1 seq_cst 19 %7 = atomicrmw xor i128* %a, i128 1 seq_cst
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | flat_atomics_i64.ll | 9 %tmp0 = atomicrmw volatile add i64 addrspace(4)* %gep, i64 %in seq_cst 19 %tmp0 = atomicrmw volatile add i64 addrspace(4)* %gep, i64 %in seq_cst 30 %tmp0 = atomicrmw volatile add i64 addrspace(4)* %gep, i64 %in seq_cst 41 %tmp0 = atomicrmw volatile add i64 addrspace(4)* %gep, i64 %in seq_cst 50 %tmp0 = atomicrmw volatile add i64 addrspace(4)* %out, i64 %in seq_cst 59 %tmp0 = atomicrmw volatile add i64 addrspace(4)* %out, i64 %in seq_cst 69 %tmp0 = atomicrmw volatile add i64 addrspace(4)* %ptr, i64 %in seq_cst 79 %tmp0 = atomicrmw volatile add i64 addrspace(4)* %ptr, i64 %in seq_cst 89 %tmp0 = atomicrmw volatile and i64 addrspace(4)* %gep, i64 %in seq_cst 99 %tmp0 = atomicrmw volatile and i64 addrspace(4)* %gep, i64 %in seq_cst [all …]
|
D | local-atomics64.ll | 8 %result = atomicrmw xchg i64 addrspace(3)* %ptr, i64 4 seq_cst 18 %result = atomicrmw xchg i64 addrspace(3)* %gep, i64 4 seq_cst 27 %result = atomicrmw add i64 addrspace(3)* %ptr, i64 4 seq_cst 43 %result = atomicrmw add i64 addrspace(3)* %gep, i64 9 seq_cst 55 %result = atomicrmw add i64 addrspace(3)* %ptr, i64 1 seq_cst 65 %result = atomicrmw add i64 addrspace(3)* %gep, i64 1 seq_cst 74 %result = atomicrmw sub i64 addrspace(3)* %ptr, i64 4 seq_cst 84 %result = atomicrmw sub i64 addrspace(3)* %gep, i64 4 seq_cst 96 %result = atomicrmw sub i64 addrspace(3)* %ptr, i64 1 seq_cst 106 %result = atomicrmw sub i64 addrspace(3)* %gep, i64 1 seq_cst [all …]
|
D | flat_atomics.ll | 9 %val = atomicrmw volatile add i32 addrspace(4)* %gep, i32 %in seq_cst 19 %val = atomicrmw volatile add i32 addrspace(4)* %gep, i32 %in seq_cst 30 %val = atomicrmw volatile add i32 addrspace(4)* %gep, i32 %in seq_cst 41 %val = atomicrmw volatile add i32 addrspace(4)* %gep, i32 %in seq_cst 50 %val = atomicrmw volatile add i32 addrspace(4)* %out, i32 %in seq_cst 59 %val = atomicrmw volatile add i32 addrspace(4)* %out, i32 %in seq_cst 69 %val = atomicrmw volatile add i32 addrspace(4)* %ptr, i32 %in seq_cst 79 %val = atomicrmw volatile add i32 addrspace(4)* %ptr, i32 %in seq_cst 89 %val = atomicrmw volatile and i32 addrspace(4)* %gep, i32 %in seq_cst 99 %val = atomicrmw volatile and i32 addrspace(4)* %gep, i32 %in seq_cst [all …]
|
D | local-atomics.ll | 15 %result = atomicrmw xchg i32 addrspace(3)* %ptr, i32 4 seq_cst 26 %result = atomicrmw xchg i32 addrspace(3)* %gep, i32 4 seq_cst 41 %result = atomicrmw add i32 addrspace(3)* %ptr, i32 4 seq_cst 52 %result = atomicrmw add i32 addrspace(3)* %gep, i32 4 seq_cst 66 %result = atomicrmw add i32 addrspace(3)* %gep, i32 4 seq_cst 77 %result = atomicrmw add i32 addrspace(3)* %ptr, i32 1 seq_cst 89 %result = atomicrmw add i32 addrspace(3)* %gep, i32 1 seq_cst 103 %result = atomicrmw add i32 addrspace(3)* %gep, i32 1 seq_cst 113 %result = atomicrmw sub i32 addrspace(3)* %ptr, i32 4 seq_cst 124 %result = atomicrmw sub i32 addrspace(3)* %gep, i32 4 seq_cst [all …]
|
D | global_atomics.ll | 9 %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst 19 %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst 32 %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst 42 %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst 54 %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst 66 %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst 75 %val = atomicrmw volatile add i32 addrspace(1)* %out, i32 %in seq_cst 84 %val = atomicrmw volatile add i32 addrspace(1)* %out, i32 %in seq_cst 95 %val = atomicrmw volatile add i32 addrspace(1)* %ptr, i32 %in seq_cst 106 %val = atomicrmw volatile add i32 addrspace(1)* %ptr, i32 %in seq_cst [all …]
|
D | global_atomics_i64.ll | 9 %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst 19 %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst 31 %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst 43 %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst 52 %tmp0 = atomicrmw volatile add i64 addrspace(1)* %out, i64 %in seq_cst 61 %tmp0 = atomicrmw volatile add i64 addrspace(1)* %out, i64 %in seq_cst 72 %tmp0 = atomicrmw volatile add i64 addrspace(1)* %ptr, i64 %in seq_cst 83 %tmp0 = atomicrmw volatile add i64 addrspace(1)* %ptr, i64 %in seq_cst 93 %tmp0 = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst 103 %tmp0 = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/AtomicExpand/X86/ |
D | expand-atomic-non-integer.ll | 20 ; CHECK: %2 = load atomic i32, i32* %1 seq_cst, align 4 23 %res = load atomic float, float* %ptr seq_cst, align 4 60 ; CHECK: store atomic i32 %1, i32* %2 seq_cst, align 4 61 store atomic float %v, float* %ptr seq_cst, align 4 87 ; CHECK: %3 = cmpxchg i64* %1, i64 0, i64 %2 seq_cst monotonic 93 cmpxchg i8** %ptr, i8* null, i8* %v seq_cst monotonic 115 ; CHECK: %3 = cmpxchg i64* %1, i64 0, i64 %2 seq_cst seq_cst 121 cmpxchg i8** %ptr, i8* null, i8* %v seq_cst seq_cst 129 ; CHECK: %3 = cmpxchg weak i64* %1, i64 0, i64 %2 seq_cst seq_cst 135 cmpxchg weak i8** %ptr, i8* null, i8* %v seq_cst seq_cst [all …]
|
/external/llvm/test/Transforms/AtomicExpand/X86/ |
D | expand-atomic-non-integer.ll | 20 ; CHECK: %2 = load atomic i32, i32* %1 seq_cst, align 4 23 %res = load atomic float, float* %ptr seq_cst, align 4 60 ; CHECK: store atomic i32 %1, i32* %2 seq_cst, align 4 61 store atomic float %v, float* %ptr seq_cst, align 4 87 ; CHECK: %3 = cmpxchg i64* %1, i64 0, i64 %2 seq_cst monotonic 93 cmpxchg i8** %ptr, i8* null, i8* %v seq_cst monotonic 115 ; CHECK: %3 = cmpxchg i64* %1, i64 0, i64 %2 seq_cst seq_cst 121 cmpxchg i8** %ptr, i8* null, i8* %v seq_cst seq_cst 129 ; CHECK: %3 = cmpxchg weak i64* %1, i64 0, i64 %2 seq_cst seq_cst 135 cmpxchg weak i8** %ptr, i8* null, i8* %v seq_cst seq_cst [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/WebAssembly/ |
D | offset-atomics.ll | 19 %v = load atomic i32, i32* %p seq_cst, align 4 31 %t = load atomic i32, i32* %s seq_cst, align 4 41 %t = load atomic i32, i32* %s seq_cst, align 4 53 %t = load atomic i32, i32* %s seq_cst, align 4 67 %t = load atomic i32, i32* %s seq_cst, align 4 79 %t = load atomic i32, i32* %s seq_cst, align 4 90 %t = load atomic i32, i32* %s seq_cst, align 4 99 %t = load atomic i32, i32* @gv seq_cst, align 4 113 %v = load atomic i64, i64* %p seq_cst, align 8 125 %t = load atomic i64, i64* %s seq_cst, align 8 [all …]
|