• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2,sse-unaligned-mem | FileCheck %s --check-prefix=SSE2
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx                    | FileCheck %s --check-prefix=AVX
4
5; Although we have the ability to fold an unaligned load with AVX
6; and under special conditions with some SSE implementations, we
7; can not fold the load under any circumstances in these test
8; cases because they are not 16-byte loads. The load must be
9; executed as a scalar ('movs*') with a zero extension to
10; 128-bits and then used in the packed logical ('andp*') op.
11; PR22371 - http://llvm.org/bugs/show_bug.cgi?id=22371
12
13define double @load_double_no_fold(double %x, double %y) {
14; SSE2-LABEL: load_double_no_fold:
15; SSE2:       # %bb.0:
16; SSE2-NEXT:    cmplesd %xmm0, %xmm1
17; SSE2-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
18; SSE2-NEXT:    andpd %xmm1, %xmm0
19; SSE2-NEXT:    retq
20;
21; AVX-LABEL: load_double_no_fold:
22; AVX:       # %bb.0:
23; AVX-NEXT:    vcmplesd %xmm0, %xmm1, %xmm0
24; AVX-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
25; AVX-NEXT:    vandpd %xmm1, %xmm0, %xmm0
26; AVX-NEXT:    retq
27
28  %cmp = fcmp oge double %x, %y
29  %zext = zext i1 %cmp to i32
30  %conv = sitofp i32 %zext to double
31  ret double %conv
32}
33
34define float @load_float_no_fold(float %x, float %y) {
35; SSE2-LABEL: load_float_no_fold:
36; SSE2:       # %bb.0:
37; SSE2-NEXT:    cmpless %xmm0, %xmm1
38; SSE2-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
39; SSE2-NEXT:    andps %xmm1, %xmm0
40; SSE2-NEXT:    retq
41;
42; AVX-LABEL: load_float_no_fold:
43; AVX:       # %bb.0:
44; AVX-NEXT:    vcmpless %xmm0, %xmm1, %xmm0
45; AVX-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
46; AVX-NEXT:    vandps %xmm1, %xmm0, %xmm0
47; AVX-NEXT:    retq
48
49  %cmp = fcmp oge float %x, %y
50  %zext = zext i1 %cmp to i32
51  %conv = sitofp i32 %zext to float
52  ret float %conv
53}
54
55