• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
2
3target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
4target triple = "x86_64-apple-macosx10.8.0"
5;CHECK: fextr
6;CHECK-NOT: insertelement
7;CHECK-NOT: extractelement
8;CHECK: fadd <2 x double>
9;CHECK: ret void
10define void @fextr(double* %ptr) {
11entry:
12  %LD = load <2 x double>, <2 x double>* undef
13  %V0 = extractelement <2 x double> %LD, i32 0
14  %V1 = extractelement <2 x double> %LD, i32 1
15  %P0 = getelementptr inbounds double, double* %ptr, i64 0
16  %P1 = getelementptr inbounds double, double* %ptr, i64 1
17  %A0 = fadd double %V0, 0.0
18  %A1 = fadd double %V1, 1.1
19  store double %A0, double* %P0, align 4
20  store double %A1, double* %P1, align 4
21  ret void
22}
23
24;CHECK: fextr1
25;CHECK: insertelement
26;CHECK: insertelement
27;CHECK: ret void
28define void @fextr1(double* %ptr) {
29entry:
30  %LD = load <2 x double>, <2 x double>* undef
31  %V0 = extractelement <2 x double> %LD, i32 0
32  %V1 = extractelement <2 x double> %LD, i32 1
33  %P0 = getelementptr inbounds double, double* %ptr, i64 1  ; <--- incorrect order
34  %P1 = getelementptr inbounds double, double* %ptr, i64 0
35  %A0 = fadd double %V0, 1.2
36  %A1 = fadd double %V1, 3.4
37  store double %A0, double* %P0, align 4
38  store double %A1, double* %P1, align 4
39  ret void
40}
41
42;CHECK: fextr2
43;CHECK: insertelement
44;CHECK: insertelement
45;CHECK: ret void
46define void @fextr2(double* %ptr) {
47entry:
48  %LD = load <4 x double>, <4 x double>* undef
49  %V0 = extractelement <4 x double> %LD, i32 0  ; <--- invalid size.
50  %V1 = extractelement <4 x double> %LD, i32 1
51  %P0 = getelementptr inbounds double, double* %ptr, i64 0
52  %P1 = getelementptr inbounds double, double* %ptr, i64 1
53  %A0 = fadd double %V0, 5.5
54  %A1 = fadd double %V1, 6.6
55  store double %A0, double* %P0, align 4
56  store double %A1, double* %P1, align 4
57  ret void
58}
59
60