• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mattr=sse2 | FileCheck %s
2
3; Note: This test is testing that the lowering for atomics matches what we
4; currently emit for non-atomics + the atomic restriction.  The presence of
5; particular lowering detail in these tests should not be read as requiring
6; that detail for correctness unless it's related to the atomicity itself.
7; (Specifically, there were reviewer questions about the lowering for halfs
8;  and their calling convention which remain unresolved.)
9
10define void @store_half(half* %fptr, half %v) {
11; CHECK-LABEL: @store_half
12; CHECK: movq	%rdi, %rbx
13; CHECK: callq	__gnu_f2h_ieee
14; CHECK: movw	%ax, (%rbx)
15  store atomic half %v, half* %fptr unordered, align 2
16  ret void
17}
18
19define void @store_float(float* %fptr, float %v) {
20; CHECK-LABEL: @store_float
21; CHECK: movd	%xmm0, %eax
22; CHECK: movl	%eax, (%rdi)
23  store atomic float %v, float* %fptr unordered, align 4
24  ret void
25}
26
27define void @store_double(double* %fptr, double %v) {
28; CHECK-LABEL: @store_double
29; CHECK: movd	%xmm0, %rax
30; CHECK: movq	%rax, (%rdi)
31  store atomic double %v, double* %fptr unordered, align 8
32  ret void
33}
34
35define void @store_fp128(fp128* %fptr, fp128 %v) {
36; CHECK-LABEL: @store_fp128
37; CHECK: callq	__sync_lock_test_and_set_16
38  store atomic fp128 %v, fp128* %fptr unordered, align 16
39  ret void
40}
41
42define half @load_half(half* %fptr) {
43; CHECK-LABEL: @load_half
44; CHECK: movw	(%rdi), %ax
45; CHECK: movzwl	%ax, %edi
46; CHECK: callq	__gnu_h2f_ieee
47  %v = load atomic half, half* %fptr unordered, align 2
48  ret half %v
49}
50
51define float @load_float(float* %fptr) {
52; CHECK-LABEL: @load_float
53; CHECK: movl	(%rdi), %eax
54; CHECK: movd	%eax, %xmm0
55  %v = load atomic float, float* %fptr unordered, align 4
56  ret float %v
57}
58
59define double @load_double(double* %fptr) {
60; CHECK-LABEL: @load_double
61; CHECK: movq	(%rdi), %rax
62; CHECK: movd	%rax, %xmm0
63  %v = load atomic double, double* %fptr unordered, align 8
64  ret double %v
65}
66
67define fp128 @load_fp128(fp128* %fptr) {
68; CHECK-LABEL: @load_fp128
69; CHECK: callq	__sync_val_compare_and_swap_16
70  %v = load atomic fp128, fp128* %fptr unordered, align 16
71  ret fp128 %v
72}
73
74
75; sanity check the seq_cst lowering since that's the
76; interesting one from an ordering perspective on x86.
77
78define void @store_float_seq_cst(float* %fptr, float %v) {
79; CHECK-LABEL: @store_float_seq_cst
80; CHECK: movd	%xmm0, %eax
81; CHECK: xchgl	%eax, (%rdi)
82  store atomic float %v, float* %fptr seq_cst, align 4
83  ret void
84}
85
86define void @store_double_seq_cst(double* %fptr, double %v) {
87; CHECK-LABEL: @store_double_seq_cst
88; CHECK: movd	%xmm0, %rax
89; CHECK: xchgq	%rax, (%rdi)
90  store atomic double %v, double* %fptr seq_cst, align 8
91  ret void
92}
93
94define float @load_float_seq_cst(float* %fptr) {
95; CHECK-LABEL: @load_float_seq_cst
96; CHECK: movl	(%rdi), %eax
97; CHECK: movd	%eax, %xmm0
98  %v = load atomic float, float* %fptr seq_cst, align 4
99  ret float %v
100}
101
102define double @load_double_seq_cst(double* %fptr) {
103; CHECK-LABEL: @load_double_seq_cst
104; CHECK: movq	(%rdi), %rax
105; CHECK: movd	%rax, %xmm0
106  %v = load atomic double, double* %fptr seq_cst, align 8
107  ret double %v
108}
109