1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt < %s -interleaved-access -S | FileCheck %s 3 4target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128" 5target triple = "aarch64--linux-gnu" 6 7define <4 x float> @vld2(<8 x float>* %pSrc) { 8; CHECK-LABEL: @vld2( 9; CHECK-NEXT: entry: 10; CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x float>* [[PSRC:%.*]] to <4 x float>* 11; CHECK-NEXT: [[LDN:%.*]] = call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2.v4f32.p0v4f32(<4 x float>* [[TMP0]]) 12; CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <4 x float>, <4 x float> } [[LDN]], 1 13; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <4 x float>, <4 x float> } [[LDN]], 1 14; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <4 x float>, <4 x float> } [[LDN]], 0 15; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <4 x float>, <4 x float> } [[LDN]], 0 16; CHECK-NEXT: [[L26:%.*]] = fmul <4 x float> [[TMP3]], [[TMP4]] 17; CHECK-NEXT: [[L43:%.*]] = fmul <4 x float> [[TMP1]], [[TMP2]] 18; CHECK-NEXT: [[L6:%.*]] = fadd fast <4 x float> [[L43]], [[L26]] 19; CHECK-NEXT: ret <4 x float> [[L6]] 20; 21entry: 22 %wide.vec = load <8 x float>, <8 x float>* %pSrc, align 4 23 %l2 = fmul fast <8 x float> %wide.vec, %wide.vec 24 %l3 = shufflevector <8 x float> %l2, <8 x float> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6> 25 %l4 = fmul fast <8 x float> %wide.vec, %wide.vec 26 %l5 = shufflevector <8 x float> %l4, <8 x float> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7> 27 %l6 = fadd fast <4 x float> %l5, %l3 28 ret <4 x float> %l6 29} 30 31define <4 x float> @vld3(<12 x float>* %pSrc) { 32; CHECK-LABEL: @vld3( 33; CHECK-NEXT: entry: 34; CHECK-NEXT: [[TMP0:%.*]] = bitcast <12 x float>* [[PSRC:%.*]] to <4 x float>* 35; CHECK-NEXT: [[LDN:%.*]] = call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3.v4f32.p0v4f32(<4 x float>* [[TMP0]]) 36; CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <4 x float>, <4 x float>, <4 x float> } [[LDN]], 2 37; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <4 x float>, <4 x float>, <4 x float> } [[LDN]], 2 38; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <4 x float>, <4 x float>, <4 x float> } [[LDN]], 1 39; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <4 x float>, <4 x float>, <4 x float> } [[LDN]], 1 40; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { <4 x float>, <4 x float>, <4 x float> } [[LDN]], 0 41; CHECK-NEXT: [[TMP6:%.*]] = extractvalue { <4 x float>, <4 x float>, <4 x float> } [[LDN]], 0 42; CHECK-NEXT: [[L29:%.*]] = fmul <4 x float> [[TMP5]], [[TMP6]] 43; CHECK-NEXT: [[L46:%.*]] = fmul <4 x float> [[TMP3]], [[TMP4]] 44; CHECK-NEXT: [[L6:%.*]] = fadd fast <4 x float> [[L46]], [[L29]] 45; CHECK-NEXT: [[L73:%.*]] = fmul <4 x float> [[TMP1]], [[TMP2]] 46; CHECK-NEXT: [[L9:%.*]] = fadd fast <4 x float> [[L6]], [[L73]] 47; CHECK-NEXT: ret <4 x float> [[L9]] 48; 49entry: 50 %wide.vec = load <12 x float>, <12 x float>* %pSrc, align 4 51 %l2 = fmul fast <12 x float> %wide.vec, %wide.vec 52 %l3 = shufflevector <12 x float> %l2, <12 x float> undef, <4 x i32> <i32 0, i32 3, i32 6, i32 9> 53 %l4 = fmul fast <12 x float> %wide.vec, %wide.vec 54 %l5 = shufflevector <12 x float> %l4, <12 x float> undef, <4 x i32> <i32 1, i32 4, i32 7, i32 10> 55 %l6 = fadd fast <4 x float> %l5, %l3 56 %l7 = fmul fast <12 x float> %wide.vec, %wide.vec 57 %l8 = shufflevector <12 x float> %l7, <12 x float> undef, <4 x i32> <i32 2, i32 5, i32 8, i32 11> 58 %l9 = fadd fast <4 x float> %l6, %l8 59 ret <4 x float> %l9 60} 61 62define <4 x float> @vld4(<16 x float>* %pSrc) { 63; CHECK-LABEL: @vld4( 64; CHECK-NEXT: entry: 65; CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x float>* [[PSRC:%.*]] to <4 x float>* 66; CHECK-NEXT: [[LDN:%.*]] = call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4.v4f32.p0v4f32(<4 x float>* [[TMP0]]) 67; CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } [[LDN]], 3 68; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } [[LDN]], 3 69; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } [[LDN]], 2 70; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } [[LDN]], 2 71; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } [[LDN]], 1 72; CHECK-NEXT: [[TMP6:%.*]] = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } [[LDN]], 1 73; CHECK-NEXT: [[TMP7:%.*]] = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } [[LDN]], 0 74; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } [[LDN]], 0 75; CHECK-NEXT: [[L312:%.*]] = fmul <4 x float> [[TMP7]], [[TMP8]] 76; CHECK-NEXT: [[L59:%.*]] = fmul <4 x float> [[TMP5]], [[TMP6]] 77; CHECK-NEXT: [[L7:%.*]] = fadd fast <4 x float> [[L59]], [[L312]] 78; CHECK-NEXT: [[L86:%.*]] = fmul <4 x float> [[TMP3]], [[TMP4]] 79; CHECK-NEXT: [[L103:%.*]] = fmul <4 x float> [[TMP1]], [[TMP2]] 80; CHECK-NEXT: [[L12:%.*]] = fadd fast <4 x float> [[L103]], [[L86]] 81; CHECK-NEXT: ret <4 x float> [[L12]] 82; 83entry: 84 %wide.vec = load <16 x float>, <16 x float>* %pSrc, align 4 85 %l3 = fmul fast <16 x float> %wide.vec, %wide.vec 86 %l4 = shufflevector <16 x float> %l3, <16 x float> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12> 87 %l5 = fmul fast <16 x float> %wide.vec, %wide.vec 88 %l6 = shufflevector <16 x float> %l5, <16 x float> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13> 89 %l7 = fadd fast <4 x float> %l6, %l4 90 %l8 = fmul fast <16 x float> %wide.vec, %wide.vec 91 %l9 = shufflevector <16 x float> %l8, <16 x float> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14> 92 %l10 = fmul fast <16 x float> %wide.vec, %wide.vec 93 %l11 = shufflevector <16 x float> %l10, <16 x float> undef, <4 x i32> <i32 3, i32 7, i32 11, i32 15> 94 %l12 = fadd fast <4 x float> %l11, %l9 95 ret <4 x float> %l12 96} 97 98define <4 x float> @twosrc(<8 x float>* %pSrc1, <8 x float>* %pSrc2) { 99; CHECK-LABEL: @twosrc( 100; CHECK-NEXT: entry: 101; CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x float>* [[PSRC1:%.*]] to <4 x float>* 102; CHECK-NEXT: [[LDN:%.*]] = call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2.v4f32.p0v4f32(<4 x float>* [[TMP0]]) 103; CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <4 x float>, <4 x float> } [[LDN]], 1 104; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <4 x float>, <4 x float> } [[LDN]], 0 105; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x float>* [[PSRC2:%.*]] to <4 x float>* 106; CHECK-NEXT: [[LDN7:%.*]] = call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2.v4f32.p0v4f32(<4 x float>* [[TMP3]]) 107; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <4 x float>, <4 x float> } [[LDN7]], 0 108; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { <4 x float>, <4 x float> } [[LDN7]], 1 109; CHECK-NEXT: [[L46:%.*]] = fmul <4 x float> [[TMP4]], [[TMP2]] 110; CHECK-NEXT: [[L63:%.*]] = fmul <4 x float> [[TMP5]], [[TMP1]] 111; CHECK-NEXT: [[L8:%.*]] = fadd fast <4 x float> [[L63]], [[L46]] 112; CHECK-NEXT: ret <4 x float> [[L8]] 113; 114entry: 115 %wide.vec = load <8 x float>, <8 x float>* %pSrc1, align 4 116 %wide.vec26 = load <8 x float>, <8 x float>* %pSrc2, align 4 117 %l4 = fmul fast <8 x float> %wide.vec26, %wide.vec 118 %l5 = shufflevector <8 x float> %l4, <8 x float> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6> 119 %l6 = fmul fast <8 x float> %wide.vec26, %wide.vec 120 %l7 = shufflevector <8 x float> %l6, <8 x float> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7> 121 %l8 = fadd fast <4 x float> %l7, %l5 122 ret <4 x float> %l8 123} 124 125define <4 x float> @twosrc2(<8 x float>* %pSrc1, <8 x float>* %pSrc2) { 126; CHECK-LABEL: @twosrc2( 127; CHECK-NEXT: entry: 128; CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x float>* [[PSRC1:%.*]] to <4 x float>* 129; CHECK-NEXT: [[LDN:%.*]] = call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2.v4f32.p0v4f32(<4 x float>* [[TMP0]]) 130; CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <4 x float>, <4 x float> } [[LDN]], 1 131; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <4 x float>, <4 x float> } [[LDN]], 0 132; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x float>* [[PSRC2:%.*]] to <4 x float>* 133; CHECK-NEXT: [[LDN4:%.*]] = call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2.v4f32.p0v4f32(<4 x float>* [[TMP3]]) 134; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <4 x float>, <4 x float> } [[LDN4]], 0 135; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { <4 x float>, <4 x float> } [[LDN4]], 1 136; CHECK-NEXT: [[L43:%.*]] = fmul <4 x float> [[TMP4]], [[TMP2]] 137; CHECK-NEXT: [[L6:%.*]] = fmul fast <4 x float> [[TMP5]], [[TMP1]] 138; CHECK-NEXT: [[L8:%.*]] = fadd fast <4 x float> [[L6]], [[L43]] 139; CHECK-NEXT: ret <4 x float> [[L8]] 140; 141entry: 142 %wide.vec = load <8 x float>, <8 x float>* %pSrc1, align 4 143 %wide.vec26 = load <8 x float>, <8 x float>* %pSrc2, align 4 144 %l4 = fmul fast <8 x float> %wide.vec26, %wide.vec 145 %l5 = shufflevector <8 x float> %l4, <8 x float> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6> 146 %s1 = shufflevector <8 x float> %wide.vec26, <8 x float> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7> 147 %s2 = shufflevector <8 x float> %wide.vec, <8 x float> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7> 148 %l6 = fmul fast <4 x float> %s1, %s2 149 %l8 = fadd fast <4 x float> %l6, %l5 150 ret <4 x float> %l8 151} 152