• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=-slow-unaligned-mem-16 | FileCheck %s --check-prefix=FAST
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+slow-unaligned-mem-16 | FileCheck %s --check-prefix=SLOW
4
5; Verify that the DAGCombiner is creating unaligned 16-byte loads and stores
6; if and only if those are fast.
7
8define void @merge_const_vec_store(i64* %ptr) {
9; FAST-LABEL: merge_const_vec_store:
10; FAST:       # BB#0:
11; FAST-NEXT:    xorps %xmm0, %xmm0
12; FAST-NEXT:    movups %xmm0, (%rdi)
13; FAST-NEXT:    retq
14;
15; SLOW-LABEL: merge_const_vec_store:
16; SLOW:       # BB#0:
17; SLOW-NEXT:    movq $0, (%rdi)
18; SLOW-NEXT:    movq $0, 8(%rdi)
19; SLOW-NEXT:    retq
20
21  %idx0 = getelementptr i64, i64* %ptr, i64 0
22  %idx1 = getelementptr i64, i64* %ptr, i64 1
23
24  store i64 0, i64* %idx0, align 8
25  store i64 0, i64* %idx1, align 8
26  ret void
27}
28
29
30define void @merge_vec_element_store(<4 x double> %v, double* %ptr) {
31; FAST-LABEL: merge_vec_element_store:
32; FAST:       # BB#0:
33; FAST-NEXT:    movups %xmm0, (%rdi)
34; FAST-NEXT:    retq
35;
36; SLOW-LABEL: merge_vec_element_store:
37; SLOW:       # BB#0:
38; SLOW-NEXT:    movlpd %xmm0, (%rdi)
39; SLOW-NEXT:    movhpd %xmm0, 8(%rdi)
40; SLOW-NEXT:    retq
41
42  %vecext0 = extractelement <4 x double> %v, i32 0
43  %vecext1 = extractelement <4 x double> %v, i32 1
44
45  %idx0 = getelementptr double, double* %ptr, i64 0
46  %idx1 = getelementptr double, double* %ptr, i64 1
47
48  store double %vecext0, double* %idx0, align 8
49  store double %vecext1, double* %idx1, align 8
50  ret void
51}
52
53
54;; TODO: FAST *should* be:
55;;    movups (%rdi), %xmm0
56;;    movups %xmm0, 40(%rdi)
57;; ..but is not currently. See the UseAA FIXME in DAGCombiner.cpp
58;; visitSTORE.
59
60define void @merge_vec_load_and_stores(i64 *%ptr) {
61; FAST-LABEL: merge_vec_load_and_stores:
62; FAST:       # BB#0:
63; FAST-NEXT:    movq (%rdi), %rax
64; FAST-NEXT:    movq 8(%rdi), %rcx
65; FAST-NEXT:    movq %rax, 40(%rdi)
66; FAST-NEXT:    movq %rcx, 48(%rdi)
67; FAST-NEXT:    retq
68;
69; SLOW-LABEL: merge_vec_load_and_stores:
70; SLOW:       # BB#0:
71; SLOW-NEXT:    movq (%rdi), %rax
72; SLOW-NEXT:    movq 8(%rdi), %rcx
73; SLOW-NEXT:    movq %rax, 40(%rdi)
74; SLOW-NEXT:    movq %rcx, 48(%rdi)
75; SLOW-NEXT:    retq
76
77  %idx0 = getelementptr i64, i64* %ptr, i64 0
78  %idx1 = getelementptr i64, i64* %ptr, i64 1
79
80  %ld0 = load i64, i64* %idx0, align 4
81  %ld1 = load i64, i64* %idx1, align 4
82
83  %idx4 = getelementptr i64, i64* %ptr, i64 5
84  %idx5 = getelementptr i64, i64* %ptr, i64 6
85
86  store i64 %ld0, i64* %idx4, align 4
87  store i64 %ld1, i64* %idx5, align 4
88  ret void
89}
90
91