Home
last modified time | relevance | path

Searched refs:atomic (Results 1 – 25 of 1728) sorted by relevance

12345678910>>...70

/external/clang/test/OpenMP/
Datomic_messages.cpp6 #pragma omp atomic in foo()
14 #pragma omp atomic in foo()
42 #pragma omp atomic read in read()
46 #pragma omp atomic read in read()
50 #pragma omp atomic read in read()
54 #pragma omp atomic read in read()
58 #pragma omp atomic read in read()
63 #pragma omp atomic read read in read()
74 #pragma omp atomic read in read()
78 #pragma omp atomic read in read()
[all …]
Datomic_messages.c6 #pragma omp atomic in foo()
14 #pragma omp atomic in foo()
33 #pragma omp atomic read in readint()
37 #pragma omp atomic read in readint()
41 #pragma omp atomic read in readint()
45 #pragma omp atomic read in readint()
49 #pragma omp atomic read in readint()
52 #pragma omp atomic read read in readint()
61 #pragma omp atomic read read in readS()
72 #pragma omp atomic write in writeint()
[all …]
Datomic_update_codegen.cpp82 #pragma omp atomic in main()
85 #pragma omp atomic in main()
88 #pragma omp atomic update in main()
91 #pragma omp atomic in main()
94 #pragma omp atomic update in main()
112 #pragma omp atomic in main()
127 #pragma omp atomic update in main()
131 #pragma omp atomic in main()
146 #pragma omp atomic update in main()
161 #pragma omp atomic in main()
[all …]
Datomic_write_codegen.c83 #pragma omp atomic write in main()
87 #pragma omp atomic write in main()
91 #pragma omp atomic write in main()
95 #pragma omp atomic write in main()
99 #pragma omp atomic write in main()
103 #pragma omp atomic write in main()
107 #pragma omp atomic write in main()
111 #pragma omp atomic write in main()
115 #pragma omp atomic write in main()
119 #pragma omp atomic write in main()
[all …]
Datomic_read_codegen.c83 #pragma omp atomic read in main()
87 #pragma omp atomic read in main()
91 #pragma omp atomic read in main()
95 #pragma omp atomic read in main()
99 #pragma omp atomic read in main()
103 #pragma omp atomic read in main()
107 #pragma omp atomic read in main()
111 #pragma omp atomic read in main()
115 #pragma omp atomic read in main()
119 #pragma omp atomic read in main()
[all …]
Datomic_capture_codegen.cpp83 #pragma omp atomic capture in main()
88 #pragma omp atomic capture in main()
92 #pragma omp atomic capture in main()
97 #pragma omp atomic capture in main()
116 #pragma omp atomic capture in main()
132 #pragma omp atomic capture in main()
137 #pragma omp atomic capture in main()
153 #pragma omp atomic capture in main()
169 #pragma omp atomic capture in main()
185 #pragma omp atomic capture in main()
[all …]
Datomic_ast_print.cpp13 #pragma omp atomic in foo()
15 #pragma omp atomic read in foo()
17 #pragma omp atomic write in foo()
19 #pragma omp atomic update in foo()
21 #pragma omp atomic capture in foo()
23 #pragma omp atomic capture in foo()
28 #pragma omp atomic seq_cst in foo()
30 #pragma omp atomic read seq_cst in foo()
32 #pragma omp atomic seq_cst write in foo()
34 #pragma omp atomic update seq_cst in foo()
[all …]
/external/guava/guava/src/com/google/common/util/concurrent/
DAtomicLongMap.java28 import java.util.concurrent.atomic.AtomicLong;
82 AtomicLong atomic = map.get(key); in get() local
83 return atomic == null ? 0L : atomic.get(); in get()
106 AtomicLong atomic = map.get(key); in addAndGet() local
107 if (atomic == null) { in addAndGet()
108 atomic = map.putIfAbsent(key, new AtomicLong(delta)); in addAndGet()
109 if (atomic == null) { in addAndGet()
116 long oldValue = atomic.get(); in addAndGet()
119 if (map.replace(key, atomic, new AtomicLong(delta))) { in addAndGet()
127 if (atomic.compareAndSet(oldValue, newValue)) { in addAndGet()
[all …]
/external/libdrm/
Dxf86atomic.h42 int atomic; member
45 # define atomic_read(x) ((x)->atomic)
46 # define atomic_set(x, val) ((x)->atomic = (val))
47 # define atomic_inc(x) ((void) __sync_fetch_and_add (&(x)->atomic, 1))
48 # define atomic_inc_return(x) (__sync_add_and_fetch (&(x)->atomic, 1))
49 # define atomic_dec_and_test(x) (__sync_add_and_fetch (&(x)->atomic, -1) == 0)
50 # define atomic_add(x, v) ((void) __sync_add_and_fetch(&(x)->atomic, (v)))
51 # define atomic_dec(x, v) ((void) __sync_sub_and_fetch(&(x)->atomic, (v)))
52 # define atomic_cmpxchg(x, oldv, newv) __sync_val_compare_and_swap (&(x)->atomic, oldv, newv)
62 AO_t atomic; member
[all …]
/external/libcxx/test/std/atomics/atomics.types.generic/
Dintegral_typedefs.pass.cpp46 static_assert((std::is_same<std::atomic<char>, std::atomic_char>::value), ""); in main()
47 static_assert((std::is_same<std::atomic<signed char>, std::atomic_schar>::value), ""); in main()
48 static_assert((std::is_same<std::atomic<unsigned char>, std::atomic_uchar>::value), ""); in main()
49 static_assert((std::is_same<std::atomic<short>, std::atomic_short>::value), ""); in main()
50 static_assert((std::is_same<std::atomic<unsigned short>, std::atomic_ushort>::value), ""); in main()
51 static_assert((std::is_same<std::atomic<int>, std::atomic_int>::value), ""); in main()
52 static_assert((std::is_same<std::atomic<unsigned int>, std::atomic_uint>::value), ""); in main()
53 static_assert((std::is_same<std::atomic<long>, std::atomic_long>::value), ""); in main()
54 static_assert((std::is_same<std::atomic<unsigned long>, std::atomic_ulong>::value), ""); in main()
55 static_assert((std::is_same<std::atomic<long long>, std::atomic_llong>::value), ""); in main()
[all …]
Dcstdint_typedefs.pass.cpp45 …static_assert((std::is_same<std::atomic< std::int_least8_t>, std::atomic_int_least8_t>::value),… in main()
46 …static_assert((std::is_same<std::atomic< std::uint_least8_t>, std::atomic_uint_least8_t>::value),… in main()
47 …static_assert((std::is_same<std::atomic< std::int_least16_t>, std::atomic_int_least16_t>::value),… in main()
48 …static_assert((std::is_same<std::atomic<std::uint_least16_t>, std::atomic_uint_least16_t>::value),… in main()
49 …static_assert((std::is_same<std::atomic< std::int_least32_t>, std::atomic_int_least32_t>::value),… in main()
50 …static_assert((std::is_same<std::atomic<std::uint_least32_t>, std::atomic_uint_least32_t>::value),… in main()
51 …static_assert((std::is_same<std::atomic< std::int_least64_t>, std::atomic_int_least64_t>::value),… in main()
52 …static_assert((std::is_same<std::atomic<std::uint_least64_t>, std::atomic_uint_least64_t>::value),… in main()
54 …static_assert((std::is_same<std::atomic< std::int_fast8_t>, std::atomic_int_fast8_t>::value), "… in main()
55 …static_assert((std::is_same<std::atomic< std::uint_fast8_t>, std::atomic_uint_fast8_t>::value), "… in main()
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/
Delement-atomic-memintrins.ll11 call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 %dest, i8 1, i32 0, i32 1)
17 ; CHECK-NEXT: store atomic i8 1, i8* [[DEST:%.*]] unordered, align 1
19 ; CHECK-NEXT: store atomic i16 257, i16* [[TMP1]] unordered, align 1
21 ; CHECK-NEXT: store atomic i32 16843009, i32* [[TMP2]] unordered, align 1
23 ; CHECK-NEXT: store atomic i64 72340172838076673, i64* [[TMP3]] unordered, align 1
24 ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 [[DEST]], i8 …
27 call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 %dest, i8 1, i32 1, i32 1)
28 call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 %dest, i8 1, i32 2, i32 1)
29 call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 %dest, i8 1, i32 4, i32 1)
30 call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 %dest, i8 1, i32 8, i32 1)
[all …]
Datomic.ll6 ; Check transforms involving atomic operations
10 ; CHECK: %x = load atomic i32, i32* %p seq_cst, align 4
12 %x = load atomic i32, i32* %p seq_cst, align 4
40 ; Forwarding from a stronger ordered atomic is fine
43 ; CHECK: %x = load atomic i32, i32* %p seq_cst, align 4
45 %x = load atomic i32, i32* %p seq_cst, align 4
46 %y = load atomic i32, i32* %p unordered, align 4
51 ; Forwarding from a non-atomic is not. (The earlier load
52 ; could in priciple be promoted to atomic and then forwarded,
53 ; but we can't just drop the atomic from the load.)
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/NVPTX/
Datomics-with-scope.ll12 %tmp36 = tail call i32 @llvm.nvvm.atomic.add.gen.i.cta.i32.p0i32(i32* %ip, i32 %i)
14 %tmp38 = tail call i64 @llvm.nvvm.atomic.add.gen.i.cta.i64.p0i64(i64* %llp, i64 %ll)
16 %tmp39 = tail call i32 @llvm.nvvm.atomic.add.gen.i.sys.i32.p0i32(i32* %ip, i32 %i)
18 %tmp41 = tail call i64 @llvm.nvvm.atomic.add.gen.i.sys.i64.p0i64(i64* %llp, i64 %ll)
20 %tmp42 = tail call float @llvm.nvvm.atomic.add.gen.f.cta.f32.p0f32(float* %fp, float %f)
22 %tmp43 = tail call double @llvm.nvvm.atomic.add.gen.f.cta.f64.p0f64(double* %dfp, double %df)
24 %tmp44 = tail call float @llvm.nvvm.atomic.add.gen.f.sys.f32.p0f32(float* %fp, float %f)
26 %tmp45 = tail call double @llvm.nvvm.atomic.add.gen.f.sys.f64.p0f64(double* %dfp, double %df)
29 %tmp46 = tail call i32 @llvm.nvvm.atomic.exch.gen.i.cta.i32.p0i32(i32* %ip, i32 %i)
31 %tmp48 = tail call i64 @llvm.nvvm.atomic.exch.gen.i.cta.i64.p0i64(i64* %llp, i64 %ll)
[all …]
/external/llvm/test/Transforms/InstCombine/
Datomic.ll6 ; Check transforms involving atomic operations
10 ; CHECK: %x = load atomic i32, i32* %p seq_cst, align 4
12 %x = load atomic i32, i32* %p seq_cst, align 4
40 ; Forwarding from a stronger ordered atomic is fine
43 ; CHECK: %x = load atomic i32, i32* %p seq_cst, align 4
45 %x = load atomic i32, i32* %p seq_cst, align 4
46 %y = load atomic i32, i32* %p unordered, align 4
51 ; Forwarding from a non-atomic is not. (The earlier load
52 ; could in priciple be promoted to atomic and then forwarded,
53 ; but we can't just drop the atomic from the load.)
[all …]
/external/llvm/test/Transforms/DeadStoreElimination/
Datomic.ll6 ; Sanity tests for atomic stores.
8 ; atomic ops, just some transformations are not allowed across release-acquire pairs.
21 store atomic i32 0, i32* @y unordered, align 4
29 ; CHECK-NOT: store atomic
31 store atomic i32 0, i32* @x unordered, align 4
36 ; DSE unordered store overwriting non-atomic store (allowed)
39 ; CHECK: store atomic i32 1
41 store atomic i32 1, i32* @x unordered, align 4
45 ; DSE no-op unordered atomic store (allowed)
50 %x = load atomic i32, i32* @x unordered, align 4
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/DeadStoreElimination/
Datomic.ll6 ; Sanity tests for atomic stores.
8 ; atomic ops, just some transformations are not allowed across release-acquire pairs.
21 store atomic i32 0, i32* @y unordered, align 4
29 ; CHECK-NOT: store atomic
31 store atomic i32 0, i32* @x unordered, align 4
36 ; DSE unordered store overwriting non-atomic store (allowed)
39 ; CHECK: store atomic i32 1
41 store atomic i32 1, i32* @x unordered, align 4
45 ; DSE no-op unordered atomic store (allowed)
50 %x = load atomic i32, i32* @x unordered, align 4
[all …]
/external/swiftshader/third_party/subzero/tests_lit/assembler/x86/
Djump_encodings.ll8 ; Use atomic ops as filler, which shouldn't get optimized out.
9 declare void @llvm.nacl.atomic.store.i32(i32, i32*, i32)
10 declare i32 @llvm.nacl.atomic.load.i32(i32*, i32)
11 declare i32 @llvm.nacl.atomic.rmw.i32(i32, i32*, i32, i32)
18 call void @llvm.nacl.atomic.store.i32(i32 %val, i32* %ptr, i32 6)
21 call void @llvm.nacl.atomic.store.i32(i32 %val, i32* %ptr, i32 6)
42 %tmp = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 6)
45 call void @llvm.nacl.atomic.store.i32(i32 %tmp, i32* %ptr, i32 6)
46 call void @llvm.nacl.atomic.store.i32(i32 %val, i32* %ptr, i32 6)
47 call void @llvm.nacl.atomic.store.i32(i32 %val, i32* %ptr, i32 6)
[all …]
/external/libcxx/include/
Datomic2 //===--------------------------- atomic -----------------------------------===//
15 atomic synopsis
94 struct atomic
123 atomic() noexcept = default;
124 constexpr atomic(T desr) noexcept;
125 atomic(const atomic&) = delete;
126 atomic& operator=(const atomic&) = delete;
127 atomic& operator=(const atomic&) volatile = delete;
133 struct atomic<integral>
180 atomic() noexcept = default;
[all …]
/external/libcxx/test/std/atomics/atomics.lockfree/
Disalwayslockfree.pass.cpp24 if (std::atomic<T>::is_always_lock_free) in checkAlwaysLockFree()
25 assert(std::atomic<T>().is_lock_free()); in checkAlwaysLockFree()
43 static_assert(std::atomic<LLong>::is_always_lock_free == (2 == ATOMIC_LLONG_LOCK_FREE)); in checkLongLongTypes()
44 static_assert(std::atomic<ULLong>::is_always_lock_free == (2 == ATOMIC_LLONG_LOCK_FREE)); in checkLongLongTypes()
57 static_assert(std::atomic<LLong>::is_always_lock_free == ExpectLockFree, ""); in checkLongLongTypes()
58 static_assert(std::atomic<ULLong>::is_always_lock_free == ExpectLockFree, ""); in checkLongLongTypes()
120 static_assert(std::atomic<bool>::is_always_lock_free == (2 == ATOMIC_BOOL_LOCK_FREE)); in run()
121 static_assert(std::atomic<char>::is_always_lock_free == (2 == ATOMIC_CHAR_LOCK_FREE)); in run()
122 static_assert(std::atomic<signed char>::is_always_lock_free == (2 == ATOMIC_CHAR_LOCK_FREE)); in run()
123 static_assert(std::atomic<unsigned char>::is_always_lock_free == (2 == ATOMIC_CHAR_LOCK_FREE)); in run()
[all …]
/external/llvm/test/Transforms/GVN/
Datomic.ll15 store atomic i32 %x, i32* @x unordered, align 4
27 %y = load atomic i32, i32* @x unordered, align 4
39 %x = load atomic i32, i32* @x unordered, align 4
48 ; CHECK: load atomic i32, i32* @x unordered
51 %x2 = load atomic i32, i32* @x unordered, align 4
62 store atomic i32 %x, i32* @x release, align 4
63 %w = load atomic i32, i32* @x acquire, align 4
75 store atomic i32 %x, i32* @x monotonic, align 4
86 %x = load atomic i32, i32* @y unordered, align 4
87 %clobber = load atomic i32, i32* @x monotonic, align 4
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/GVN/PRE/
Datomic.ll15 store atomic i32 %x, i32* @x unordered, align 4
27 %y = load atomic i32, i32* @x unordered, align 4
39 %x = load atomic i32, i32* @x unordered, align 4
48 ; CHECK: load atomic i32, i32* @x unordered
51 %x2 = load atomic i32, i32* @x unordered, align 4
62 store atomic i32 %x, i32* @x release, align 4
63 %w = load atomic i32, i32* @x acquire, align 4
75 store atomic i32 %x, i32* @x monotonic, align 4
86 %x = load atomic i32, i32* @y unordered, align 4
87 %clobber = load atomic i32, i32* @x monotonic, align 4
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/Verifier/
Delement-wise-atomic-memory-intrinsics.ll4 ; CHECK: element size of the element-wise unordered atomic memory intrinsic must be a constant int
5 …call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 …
6 ; CHECK: element size of the element-wise atomic memory intrinsic must be a power of 2
7 …call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 …
9 …; CHECK: constant length must be a multiple of the element size in the element-wise atomic memory …
10 …call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 …
13 …call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* %P, i8* align 4 %Q, i32 1, i32 1)
15 …call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %P, i8* align 4 %Q, i32 …
18 …call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* %Q, i32 1, i32 1)
20 …call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 1 %Q, i32 …
[all …]
/external/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/
Dllvm.amdgcn.buffer.atomic.ll3 ;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.swap(
6 %orig = call i32 @llvm.amdgcn.buffer.atomic.swap(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0)
11 ;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.add(
14 %orig = call i32 @llvm.amdgcn.buffer.atomic.add(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0)
19 ;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.sub(
22 %orig = call i32 @llvm.amdgcn.buffer.atomic.sub(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0)
27 ;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.smin(
30 %orig = call i32 @llvm.amdgcn.buffer.atomic.smin(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0)
35 ;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.umin(
38 %orig = call i32 @llvm.amdgcn.buffer.atomic.umin(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0)
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/
Dllvm.amdgcn.buffer.atomic.ll3 ;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.swap(
6 %orig = call i32 @llvm.amdgcn.buffer.atomic.swap(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0)
11 ;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.add(
14 %orig = call i32 @llvm.amdgcn.buffer.atomic.add(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0)
19 ;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.sub(
22 %orig = call i32 @llvm.amdgcn.buffer.atomic.sub(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0)
27 ;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.smin(
30 %orig = call i32 @llvm.amdgcn.buffer.atomic.smin(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0)
35 ;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.umin(
38 %orig = call i32 @llvm.amdgcn.buffer.atomic.umin(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0)
[all …]

12345678910>>...70