Home
last modified time | relevance | path

Searched refs:seq_cst (Results 1 – 25 of 226) sorted by relevance

12345678910

/external/clang/test/OpenMP/
Datomic_ast_print.cpp28 #pragma omp atomic seq_cst in foo()
30 #pragma omp atomic read seq_cst in foo()
32 #pragma omp atomic seq_cst write in foo()
34 #pragma omp atomic update seq_cst in foo()
36 #pragma omp atomic seq_cst capture in foo()
38 #pragma omp atomic capture seq_cst in foo()
128 #pragma omp atomic seq_cst in main()
130 #pragma omp atomic read seq_cst in main()
132 #pragma omp atomic seq_cst write in main()
134 #pragma omp atomic update seq_cst in main()
[all …]
/external/llvm/test/CodeGen/X86/
Dnocx16.ll5 %0 = cmpxchg i128* %a, i128 1, i128 1 seq_cst seq_cst
7 %1 = atomicrmw xchg i128* %a, i128 1 seq_cst
9 %2 = atomicrmw add i128* %a, i128 1 seq_cst
11 %3 = atomicrmw sub i128* %a, i128 1 seq_cst
13 %4 = atomicrmw and i128* %a, i128 1 seq_cst
15 %5 = atomicrmw nand i128* %a, i128 1 seq_cst
17 %6 = atomicrmw or i128* %a, i128 1 seq_cst
19 %7 = atomicrmw xor i128* %a, i128 1 seq_cst
Dcmpxchg-i1.ll9 %pair = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst seq_cst
20 %pair = cmpxchg i64* %addr, i64 %desired, i64 %new seq_cst seq_cst
39 %pair = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst seq_cst
51 %pair = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst seq_cst
71 %init = load atomic i32, i32* %addr seq_cst, align 4
77 %pair = cmpxchg i32* %addr, i32 %old, i32 %new seq_cst seq_cst
Dcmpxchg-i128-i1.ll9 %pair = cmpxchg i128* %addr, i128 %desired, i128 %new seq_cst seq_cst
20 %pair = cmpxchg i128* %addr, i128 %desired, i128 %new seq_cst seq_cst
39 %pair = cmpxchg i128* %addr, i128 %desired, i128 %new seq_cst seq_cst
51 %pair = cmpxchg i128* %addr, i128 %desired, i128 %new seq_cst seq_cst
65 %init = load atomic i128, i128* %addr seq_cst, align 16
72 %pair = cmpxchg i128* %addr, i128 %old, i128 %new seq_cst seq_cst
/external/llvm/test/CodeGen/SystemZ/
Dcmpxchg-03.ll10 %pair = cmpxchg i32 *%src, i32 %cmp, i32 %swap seq_cst seq_cst
21 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
32 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
43 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
56 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
67 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
78 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
91 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
104 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
118 %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
[all …]
Dcmpxchg-04.ll10 %pairval = cmpxchg i64 *%src, i64 %cmp, i64 %swap seq_cst seq_cst
21 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
34 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
45 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
56 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
69 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
82 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
93 %pairval = cmpxchg i64 *%ptr, i64 1001, i64 %swap seq_cst seq_cst
104 %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 1002 seq_cst seq_cst
Dcmpxchg-05.ll13 %cx = cmpxchg i8* %0, i8 %1, i8 %2 seq_cst seq_cst
26 %cx = cmpxchg i16* %0, i16 %1, i16 %2 seq_cst seq_cst
39 %cx = cmpxchg i8* %0, i8 %1, i8 %2 seq_cst seq_cst
52 %cx = cmpxchg i16* %0, i16 %1, i16 %2 seq_cst seq_cst
62 %cx = cmpxchg i8* %0, i8 %1, i8 %2 seq_cst seq_cst
74 %cx = cmpxchg i8* %0, i8 %1, i8 %2 seq_cst seq_cst
Datomicrmw-or-04.ll15 %res = atomicrmw or i64 *%src, i64 %b seq_cst
29 %res = atomicrmw or i64 *%src, i64 1 seq_cst
38 %res = atomicrmw or i64 *%src, i64 65535 seq_cst
47 %res = atomicrmw or i64 *%src, i64 65536 seq_cst
56 %res = atomicrmw or i64 *%src, i64 65537 seq_cst
65 %res = atomicrmw or i64 *%src, i64 4294901760 seq_cst
74 %res = atomicrmw or i64 *%src, i64 4294901761 seq_cst
83 %res = atomicrmw or i64 *%src, i64 4294967295 seq_cst
92 %res = atomicrmw or i64 *%src, i64 4294967296 seq_cst
102 %res = atomicrmw or i64 *%src, i64 4294967297 seq_cst
[all …]
Datomicrmw-and-04.ll15 %res = atomicrmw and i64 *%src, i64 %b seq_cst
25 %res = atomicrmw and i64 *%src, i64 1 seq_cst
38 %res = atomicrmw and i64 *%src, i64 8589934591 seq_cst
52 %res = atomicrmw and i64 *%src, i64 12884901887 seq_cst
61 %res = atomicrmw and i64 *%src, i64 12884901888 seq_cst
70 %res = atomicrmw and i64 *%src, i64 844424930131967 seq_cst
79 %res = atomicrmw and i64 *%src, i64 281474976710656 seq_cst
88 %res = atomicrmw and i64 *%src, i64 -6 seq_cst
97 %res = atomicrmw and i64 *%src, i64 -65534 seq_cst
106 %res = atomicrmw and i64 *%src, i64 -65538 seq_cst
[all …]
Datomicrmw-nand-04.ll17 %res = atomicrmw nand i64 *%src, i64 %b seq_cst
27 %res = atomicrmw nand i64 *%src, i64 1 seq_cst
42 %res = atomicrmw nand i64 *%src, i64 8589934591 seq_cst
58 %res = atomicrmw nand i64 *%src, i64 12884901887 seq_cst
67 %res = atomicrmw nand i64 *%src, i64 12884901888 seq_cst
76 %res = atomicrmw nand i64 *%src, i64 844424930131967 seq_cst
85 %res = atomicrmw nand i64 *%src, i64 281474976710656 seq_cst
94 %res = atomicrmw nand i64 *%src, i64 -6 seq_cst
103 %res = atomicrmw nand i64 *%src, i64 -65534 seq_cst
112 %res = atomicrmw nand i64 *%src, i64 -65538 seq_cst
[all …]
Datomicrmw-sub-04.ll15 %res = atomicrmw sub i64 *%src, i64 %b seq_cst
29 %res = atomicrmw sub i64 *%src, i64 1 seq_cst
38 %res = atomicrmw sub i64 *%src, i64 32768 seq_cst
47 %res = atomicrmw sub i64 *%src, i64 32769 seq_cst
56 %res = atomicrmw sub i64 *%src, i64 2147483648 seq_cst
65 %res = atomicrmw sub i64 *%src, i64 2147483649 seq_cst
74 %res = atomicrmw sub i64 *%src, i64 -1 seq_cst
83 %res = atomicrmw sub i64 *%src, i64 -32767 seq_cst
92 %res = atomicrmw sub i64 *%src, i64 -32768 seq_cst
101 %res = atomicrmw sub i64 *%src, i64 -2147483647 seq_cst
[all …]
Datomicrmw-add-04.ll15 %res = atomicrmw add i64 *%src, i64 %b seq_cst
29 %res = atomicrmw add i64 *%src, i64 1 seq_cst
38 %res = atomicrmw add i64 *%src, i64 32767 seq_cst
47 %res = atomicrmw add i64 *%src, i64 32768 seq_cst
56 %res = atomicrmw add i64 *%src, i64 2147483647 seq_cst
65 %res = atomicrmw add i64 *%src, i64 2147483648 seq_cst
74 %res = atomicrmw add i64 *%src, i64 -1 seq_cst
83 %res = atomicrmw add i64 *%src, i64 -32768 seq_cst
92 %res = atomicrmw add i64 *%src, i64 -32769 seq_cst
101 %res = atomicrmw add i64 *%src, i64 -2147483648 seq_cst
[all …]
/external/llvm/test/CodeGen/NVPTX/
Datomics.ll7 %ret = atomicrmw add i32* %addr, i32 %val seq_cst
14 %ret = atomicrmw add i64* %addr, i64 %val seq_cst
22 %ret = atomicrmw sub i32* %subr, i32 %val seq_cst
30 %ret = atomicrmw sub i64* %subr, i64 %val seq_cst
37 %ret = atomicrmw and i32* %subr, i32 %val seq_cst
44 %ret = atomicrmw and i64* %subr, i64 %val seq_cst
50 ; %ret = atomicrmw nand i32* %subr, i32 %val seq_cst
55 ; %ret = atomicrmw nand i64* %subr, i64 %val seq_cst
62 %ret = atomicrmw or i32* %subr, i32 %val seq_cst
69 %ret = atomicrmw or i64* %subr, i64 %val seq_cst
[all …]
/external/llvm/test/CodeGen/AMDGPU/
Dlocal-atomics64.ll8 %result = atomicrmw xchg i64 addrspace(3)* %ptr, i64 4 seq_cst
18 %result = atomicrmw xchg i64 addrspace(3)* %gep, i64 4 seq_cst
27 %result = atomicrmw add i64 addrspace(3)* %ptr, i64 4 seq_cst
43 %result = atomicrmw add i64 addrspace(3)* %gep, i64 9 seq_cst
55 %result = atomicrmw add i64 addrspace(3)* %ptr, i64 1 seq_cst
65 %result = atomicrmw add i64 addrspace(3)* %gep, i64 1 seq_cst
74 %result = atomicrmw sub i64 addrspace(3)* %ptr, i64 4 seq_cst
84 %result = atomicrmw sub i64 addrspace(3)* %gep, i64 4 seq_cst
96 %result = atomicrmw sub i64 addrspace(3)* %ptr, i64 1 seq_cst
106 %result = atomicrmw sub i64 addrspace(3)* %gep, i64 1 seq_cst
[all …]
Dflat_atomics_i64.ll9 %tmp0 = atomicrmw volatile add i64 addrspace(4)* %gep, i64 %in seq_cst
19 %tmp0 = atomicrmw volatile add i64 addrspace(4)* %gep, i64 %in seq_cst
30 %tmp0 = atomicrmw volatile add i64 addrspace(4)* %gep, i64 %in seq_cst
41 %tmp0 = atomicrmw volatile add i64 addrspace(4)* %gep, i64 %in seq_cst
50 %tmp0 = atomicrmw volatile add i64 addrspace(4)* %out, i64 %in seq_cst
59 %tmp0 = atomicrmw volatile add i64 addrspace(4)* %out, i64 %in seq_cst
69 %tmp0 = atomicrmw volatile add i64 addrspace(4)* %ptr, i64 %in seq_cst
79 %tmp0 = atomicrmw volatile add i64 addrspace(4)* %ptr, i64 %in seq_cst
89 %tmp0 = atomicrmw volatile and i64 addrspace(4)* %gep, i64 %in seq_cst
99 %tmp0 = atomicrmw volatile and i64 addrspace(4)* %gep, i64 %in seq_cst
[all …]
Dflat_atomics.ll9 %val = atomicrmw volatile add i32 addrspace(4)* %gep, i32 %in seq_cst
19 %val = atomicrmw volatile add i32 addrspace(4)* %gep, i32 %in seq_cst
30 %val = atomicrmw volatile add i32 addrspace(4)* %gep, i32 %in seq_cst
41 %val = atomicrmw volatile add i32 addrspace(4)* %gep, i32 %in seq_cst
50 %val = atomicrmw volatile add i32 addrspace(4)* %out, i32 %in seq_cst
59 %val = atomicrmw volatile add i32 addrspace(4)* %out, i32 %in seq_cst
69 %val = atomicrmw volatile add i32 addrspace(4)* %ptr, i32 %in seq_cst
79 %val = atomicrmw volatile add i32 addrspace(4)* %ptr, i32 %in seq_cst
89 %val = atomicrmw volatile and i32 addrspace(4)* %gep, i32 %in seq_cst
99 %val = atomicrmw volatile and i32 addrspace(4)* %gep, i32 %in seq_cst
[all …]
Dlocal-atomics.ll15 %result = atomicrmw xchg i32 addrspace(3)* %ptr, i32 4 seq_cst
26 %result = atomicrmw xchg i32 addrspace(3)* %gep, i32 4 seq_cst
41 %result = atomicrmw add i32 addrspace(3)* %ptr, i32 4 seq_cst
52 %result = atomicrmw add i32 addrspace(3)* %gep, i32 4 seq_cst
66 %result = atomicrmw add i32 addrspace(3)* %gep, i32 4 seq_cst
77 %result = atomicrmw add i32 addrspace(3)* %ptr, i32 1 seq_cst
89 %result = atomicrmw add i32 addrspace(3)* %gep, i32 1 seq_cst
103 %result = atomicrmw add i32 addrspace(3)* %gep, i32 1 seq_cst
113 %result = atomicrmw sub i32 addrspace(3)* %ptr, i32 4 seq_cst
124 %result = atomicrmw sub i32 addrspace(3)* %gep, i32 4 seq_cst
[all …]
Dglobal_atomics_i64.ll9 %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst
19 %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst
31 %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst
43 %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst
52 %tmp0 = atomicrmw volatile add i64 addrspace(1)* %out, i64 %in seq_cst
61 %tmp0 = atomicrmw volatile add i64 addrspace(1)* %out, i64 %in seq_cst
72 %tmp0 = atomicrmw volatile add i64 addrspace(1)* %ptr, i64 %in seq_cst
83 %tmp0 = atomicrmw volatile add i64 addrspace(1)* %ptr, i64 %in seq_cst
93 %tmp0 = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst
103 %tmp0 = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst
[all …]
Dglobal_atomics.ll9 %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
19 %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
32 %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
42 %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
54 %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
66 %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
75 %val = atomicrmw volatile add i32 addrspace(1)* %out, i32 %in seq_cst
84 %val = atomicrmw volatile add i32 addrspace(1)* %out, i32 %in seq_cst
95 %val = atomicrmw volatile add i32 addrspace(1)* %ptr, i32 %in seq_cst
106 %val = atomicrmw volatile add i32 addrspace(1)* %ptr, i32 %in seq_cst
[all …]
/external/llvm/test/Transforms/AtomicExpand/X86/
Dexpand-atomic-non-integer.ll20 ; CHECK: %2 = load atomic i32, i32* %1 seq_cst, align 4
23 %res = load atomic float, float* %ptr seq_cst, align 4
60 ; CHECK: store atomic i32 %1, i32* %2 seq_cst, align 4
61 store atomic float %v, float* %ptr seq_cst, align 4
87 ; CHECK: %3 = cmpxchg i64* %1, i64 0, i64 %2 seq_cst monotonic
93 cmpxchg i8** %ptr, i8* null, i8* %v seq_cst monotonic
115 ; CHECK: %3 = cmpxchg i64* %1, i64 0, i64 %2 seq_cst seq_cst
121 cmpxchg i8** %ptr, i8* null, i8* %v seq_cst seq_cst
129 ; CHECK: %3 = cmpxchg weak i64* %1, i64 0, i64 %2 seq_cst seq_cst
135 cmpxchg weak i8** %ptr, i8* null, i8* %v seq_cst seq_cst
[all …]
/external/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/
Datomics.ll3 ; CHECK: DIVERGENT: %orig = atomicrmw xchg i32* %ptr, i32 %val seq_cst
5 %orig = atomicrmw xchg i32* %ptr, i32 %val seq_cst
9 ; CHECK: DIVERGENT: %orig = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
11 %orig = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
/external/llvm/test/Bitcode/
Datomic.ll5 cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst seq_cst
6 ; CHECK: cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst seq_cst
8 cmpxchg volatile i32* %addr, i32 %desired, i32 %new seq_cst monotonic
9 ; CHECK: cmpxchg volatile i32* %addr, i32 %desired, i32 %new seq_cst monotonic
DmemInstructions.3.2.ll95 ; CHECK-NEXT: %res4 = load atomic i8, i8* %ptr1 seq_cst, align 1
96 %res4 = load atomic i8, i8* %ptr1 seq_cst, align 1
107 ; CHECK-NEXT: %res8 = load atomic volatile i8, i8* %ptr1 seq_cst, align 1
108 %res8 = load atomic volatile i8, i8* %ptr1 seq_cst, align 1
119 ; CHECK-NEXT: %res12 = load atomic i8, i8* %ptr1 singlethread seq_cst, align 1
120 %res12 = load atomic i8, i8* %ptr1 singlethread seq_cst, align 1
131 ; CHECK-NEXT: %res16 = load atomic volatile i8, i8* %ptr1 singlethread seq_cst, align 1
132 %res16 = load atomic volatile i8, i8* %ptr1 singlethread seq_cst, align 1
181 ; CHECK-NEXT: store atomic i8 2, i8* %ptr1 seq_cst, align 1
182 store atomic i8 2, i8* %ptr1 seq_cst, align 1
[all …]
/external/swiftshader/third_party/LLVM/test/CodeGen/Thumb2/
Dthumb2-branch.ll14 fence seq_cst
19 fence seq_cst
31 fence seq_cst
36 fence seq_cst
48 fence seq_cst
53 fence seq_cst
65 fence seq_cst
70 fence seq_cst
/external/llvm/test/CodeGen/ARM/
Dldstrex-m.ll9 %0 = load atomic i64, i64* %p seq_cst, align 8
17 store atomic i64 0, i64* %p seq_cst, align 8
26 %0 = atomicrmw add i64* %p, i64 1 seq_cst
34 %0 = load atomic i32, i32* %p seq_cst, align 4
42 %0 = load atomic i8, i8* %p seq_cst, align 4
50 store atomic i32 0, i32* %p seq_cst, align 4
59 %0 = atomicrmw add i32* %p, i32 1 seq_cst

12345678910