/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/ |
D | memory-legalizer-atomic-rmw.ll | 12 %val = atomicrmw volatile xchg i32* %out, i32 %in monotonic 24 %val = atomicrmw volatile xchg i32* %out, i32 %in acquire 36 %val = atomicrmw volatile xchg i32* %out, i32 %in release 48 %val = atomicrmw volatile xchg i32* %out, i32 %in acq_rel 60 %val = atomicrmw volatile xchg i32* %out, i32 %in seq_cst 72 %val = atomicrmw volatile xchg i32* %out, i32 %in syncscope("singlethread") monotonic 84 %val = atomicrmw volatile xchg i32* %out, i32 %in syncscope("singlethread") acquire 96 %val = atomicrmw volatile xchg i32* %out, i32 %in syncscope("singlethread") release 108 %val = atomicrmw volatile xchg i32* %out, i32 %in syncscope("singlethread") acq_rel 120 %val = atomicrmw volatile xchg i32* %out, i32 %in syncscope("singlethread") seq_cst [all …]
|
/external/compiler-rt/lib/sanitizer_common/ |
D | sanitizer_atomic_msvc.h | 206 uptr xchg, in atomic_compare_exchange_strong() argument 210 (void*volatile*)&a->val_dont_use, (void*)xchg, (void*)cmpv); in atomic_compare_exchange_strong() 219 u16 xchg, in atomic_compare_exchange_strong() argument 223 (volatile short*)&a->val_dont_use, (short)xchg, (short)cmpv); in atomic_compare_exchange_strong() 232 u32 xchg, in atomic_compare_exchange_strong() argument 236 (volatile long*)&a->val_dont_use, (long)xchg, (long)cmpv); in atomic_compare_exchange_strong() 245 u64 xchg, in atomic_compare_exchange_strong() argument 249 (volatile long long*)&a->val_dont_use, (long long)xchg, (long long)cmpv); in atomic_compare_exchange_strong() 259 typename T::Type xchg, in atomic_compare_exchange_weak() argument 261 return atomic_compare_exchange_strong(a, cmp, xchg, mo); in atomic_compare_exchange_weak()
|
D | sanitizer_atomic_clang.h | 77 typename T::Type xchg, in atomic_compare_exchange_strong() argument 81 Type prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg); in atomic_compare_exchange_strong() 91 typename T::Type xchg, in atomic_compare_exchange_weak() argument 93 return atomic_compare_exchange_strong(a, cmp, xchg, mo); in atomic_compare_exchange_weak()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/SystemZ/ |
D | atomicrmw-xchg-03.ll | 13 %res = atomicrmw xchg i32 *%src, i32 %b seq_cst 24 %res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst 35 %res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst 46 %res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst 58 %res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst 69 %res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst 80 %res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst 92 %res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst 105 %res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst 119 %res = atomicrmw xchg i32 *%src, i32 40000 seq_cst
|
D | atomicrmw-xchg-04.ll | 13 %res = atomicrmw xchg i64 *%src, i64 %b seq_cst 24 %res = atomicrmw xchg i64 *%ptr, i64 %b seq_cst 36 %res = atomicrmw xchg i64 *%ptr, i64 %b seq_cst 47 %res = atomicrmw xchg i64 *%ptr, i64 %b seq_cst 59 %res = atomicrmw xchg i64 *%ptr, i64 %b seq_cst 72 %res = atomicrmw xchg i64 *%ptr, i64 %b seq_cst 86 %res = atomicrmw xchg i64 *%ptr, i64 3000000000 seq_cst
|
/external/llvm/test/CodeGen/SystemZ/ |
D | atomicrmw-xchg-03.ll | 13 %res = atomicrmw xchg i32 *%src, i32 %b seq_cst 24 %res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst 35 %res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst 46 %res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst 58 %res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst 69 %res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst 80 %res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst 92 %res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst 105 %res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst 119 %res = atomicrmw xchg i32 *%src, i32 40000 seq_cst
|
D | atomicrmw-xchg-04.ll | 13 %res = atomicrmw xchg i64 *%src, i64 %b seq_cst 24 %res = atomicrmw xchg i64 *%ptr, i64 %b seq_cst 36 %res = atomicrmw xchg i64 *%ptr, i64 %b seq_cst 47 %res = atomicrmw xchg i64 *%ptr, i64 %b seq_cst 59 %res = atomicrmw xchg i64 *%ptr, i64 %b seq_cst 72 %res = atomicrmw xchg i64 *%ptr, i64 %b seq_cst 86 %res = atomicrmw xchg i64 *%ptr, i64 3000000000 seq_cst
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AVR/atomics/ |
D | swap.ll | 6 %val = atomicrmw xchg i8* %foo, i8 13 seq_cst 13 %val = atomicrmw xchg i16* %foo, i16 13 seq_cst 20 %val = atomicrmw xchg i32* %foo, i32 13 seq_cst 27 %val = atomicrmw xchg i64* %foo, i64 13 seq_cst
|
/external/llvm/test/MC/X86/ |
D | intel-syntax.s | 453 xchg RAX, RCX label 454 xchg RCX, RAX label 455 xchg EAX, ECX label 456 xchg ECX, EAX label 457 xchg AX, CX label 458 xchg CX, AX label 466 xchg RAX, [ECX] label 467 xchg [ECX], RAX label 468 xchg EAX, [ECX] label 469 xchg [ECX], EAX label [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/MC/X86/ |
D | intel-syntax.s | 515 xchg RAX, RCX label 516 xchg RCX, RAX label 517 xchg EAX, ECX label 518 xchg ECX, EAX label 519 xchg AX, CX label 520 xchg CX, AX label 528 xchg RAX, [ECX] label 529 xchg [ECX], RAX label 530 xchg EAX, [ECX] label 531 xchg [ECX], EAX label [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Assembler/ |
D | atomic.ll | 30 ; CHECK: atomicrmw volatile xchg i32* %x, i32 10 monotonic 31 atomicrmw volatile xchg i32* %x, i32 10 monotonic 32 ; CHECK: atomicrmw volatile xchg i32* %x, i32 10 syncscope("agent") monotonic 33 atomicrmw volatile xchg i32* %x, i32 10 syncscope("agent") monotonic
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/WebAssembly/ |
D | atomic-rmw.ll | 60 ; CHECK: i32.atomic.rmw.xchg $push0=, 0($0), $1{{$}} 63 %old = atomicrmw xchg i32* %p, i32 %v seq_cst 118 ; CHECK: i64.atomic.rmw.xchg $push0=, 0($0), $1{{$}} 121 %old = atomicrmw xchg i64* %p, i64 %v seq_cst 449 ; xchg 453 ; CHECK: i32.atomic.rmw8_u.xchg $push0=, 0($0), $1{{$}} 458 %old = atomicrmw xchg i8* %p, i8 %t seq_cst 465 ; CHECK: i32.atomic.rmw16_u.xchg $push0=, 0($0), $1{{$}} 470 %old = atomicrmw xchg i16* %p, i16 %t seq_cst 477 ; CHECK: i64.atomic.rmw8_u.xchg $push0=, 0($0), $1{{$}} [all …]
|
/external/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/ |
D | atomics.ll | 3 ; CHECK: DIVERGENT: %orig = atomicrmw xchg i32* %ptr, i32 %val seq_cst 5 %orig = atomicrmw xchg i32* %ptr, i32 %val seq_cst
|
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
D | 2006-07-20-InlineAsm.ll | 10 …call void asm sideeffect "xchg{l} {$0,$1|$1,$0}", "=*m,=*r,m,1,~{dirflag},~{fpsr},~{flags}"( i32* … 19 …call void asm sideeffect "xchg{l} {$0,$1|$1,$0}", "=*m,=*r,1,~{dirflag},~{fpsr},~{flags}"( i32* @G…
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | 2006-07-20-InlineAsm.ll | 10 …call void asm sideeffect "xchg{l} {$0,$1|$1,$0}", "=*m,=*r,m,1,~{dirflag},~{fpsr},~{flags}"( i32* … 19 …call void asm sideeffect "xchg{l} {$0,$1|$1,$0}", "=*m,=*r,1,~{dirflag},~{fpsr},~{flags}"( i32* @G…
|
/external/llvm/test/CodeGen/X86/ |
D | 2006-07-20-InlineAsm.ll | 10 …call void asm sideeffect "xchg{l} {$0,$1|$1,$0}", "=*m,=*r,m,1,~{dirflag},~{fpsr},~{flags}"( i32* … 19 …call void asm sideeffect "xchg{l} {$0,$1|$1,$0}", "=*m,=*r,1,~{dirflag},~{fpsr},~{flags}"( i32* @G…
|
/external/linux-kselftest/tools/testing/selftests/powerpc/benchmarks/ |
D | context_switch.c | 267 static unsigned long xchg(unsigned long *p, unsigned long val) in xchg() function 286 c = xchg(m, 2); in mutex_lock() 290 c = xchg(m, 2); in mutex_lock() 304 else if (xchg(m, 0) == 1) in mutex_unlock()
|
/external/llvm/test/CodeGen/PowerPC/ |
D | atomic-2.ll | 62 %tmp = atomicrmw xchg i64* %mem, i64 1 monotonic 70 %tmp = atomicrmw xchg i8* %mem, i8 1 monotonic 78 %tmp = atomicrmw xchg i16* %mem, i16 1 monotonic
|
/external/swiftshader/third_party/LLVM/test/Assembler/ |
D | atomic.ll | 19 ; CHECK: atomicrmw volatile xchg i32* %x, i32 10 monotonic 20 atomicrmw volatile xchg i32* %x, i32 10 monotonic
|
/external/u-boot/arch/microblaze/include/asm/ |
D | system.h | 130 #define xchg(ptr, with) \ macro 132 #define tas(ptr) (xchg ((ptr), 1))
|
/external/boringssl/src/crypto/perlasm/ |
D | x86asm.pl | 80 &xchg(&HB(@_),&LB(@_)); 82 &xchg(&HB(@_),&LB(@_)); 93 sub ::exch { &xchg(@_); }
|
/external/swiftshader/third_party/LLVM/test/MC/Disassembler/X86/ |
D | intel-syntax.txt | 27 # CHECK: xchg EAX, R8D 30 # CHECK: xchg RAX, R8
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/LowerAtomic/ |
D | atomic-swap.ll | 23 %j = atomicrmw xchg i8* %i, i8 42 monotonic 34 %j = atomicrmw xchg i8* %i, i8 42 monotonic
|
/external/u-boot/arch/x86/cpu/ |
D | lapic.c | 21 #define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), \ macro 62 (void)xchg((volatile unsigned long *)(LAPIC_DEFAULT_BASE + reg), v); in lapic_write()
|
/external/llvm/test/Assembler/ |
D | atomic.ll | 24 ; CHECK: atomicrmw volatile xchg i32* %x, i32 10 monotonic 25 atomicrmw volatile xchg i32* %x, i32 10 monotonic
|