1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt < %s -basic-aa -slp-vectorizer -S -mtriple=aarch64-unknown-unknown -mcpu=cortex-a53 | FileCheck %s 3 4target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" 5 6; This test is reduced from the matrix multiplication benchmark in the test-suite: 7; https://github.com/llvm/llvm-test-suite/tree/master/SingleSource/Benchmarks/Misc/matmul_f64_4x4.c 8; The operations here are expected to be vectorized to <2 x double>. 9; Otherwise, performance will suffer on Cortex-A53. 10 11define void @wrap_mul4(double* nocapture %Out, [2 x double]* nocapture readonly %A, [4 x double]* nocapture readonly %B) { 12; CHECK-LABEL: @wrap_mul4( 13; CHECK-NEXT: [[ARRAYIDX1_I:%.*]] = getelementptr inbounds [2 x double], [2 x double]* [[A:%.*]], i64 0, i64 0 14; CHECK-NEXT: [[TEMP:%.*]] = load double, double* [[ARRAYIDX1_I]], align 8 15; CHECK-NEXT: [[ARRAYIDX3_I:%.*]] = getelementptr inbounds [4 x double], [4 x double]* [[B:%.*]], i64 0, i64 0 16; CHECK-NEXT: [[ARRAYIDX5_I:%.*]] = getelementptr inbounds [2 x double], [2 x double]* [[A]], i64 0, i64 1 17; CHECK-NEXT: [[TEMP2:%.*]] = load double, double* [[ARRAYIDX5_I]], align 8 18; CHECK-NEXT: [[ARRAYIDX7_I:%.*]] = getelementptr inbounds [4 x double], [4 x double]* [[B]], i64 1, i64 0 19; CHECK-NEXT: [[ARRAYIDX13_I:%.*]] = getelementptr inbounds [4 x double], [4 x double]* [[B]], i64 0, i64 1 20; CHECK-NEXT: [[TMP1:%.*]] = bitcast double* [[ARRAYIDX3_I]] to <2 x double>* 21; CHECK-NEXT: [[TMP2:%.*]] = load <2 x double>, <2 x double>* [[TMP1]], align 8 22; CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x double> undef, double [[TEMP]], i32 0 23; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x double> [[TMP3]], double [[TEMP]], i32 1 24; CHECK-NEXT: [[TMP5:%.*]] = fmul <2 x double> [[TMP4]], [[TMP2]] 25; CHECK-NEXT: [[ARRAYIDX18_I:%.*]] = getelementptr inbounds [4 x double], [4 x double]* [[B]], i64 1, i64 1 26; CHECK-NEXT: [[TMP6:%.*]] = bitcast double* [[ARRAYIDX7_I]] to <2 x double>* 27; CHECK-NEXT: [[TMP7:%.*]] = load <2 x double>, <2 x double>* [[TMP6]], align 8 28; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x double> undef, double [[TEMP2]], i32 0 29; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x double> [[TMP8]], double [[TEMP2]], i32 1 30; CHECK-NEXT: [[TMP10:%.*]] = fmul <2 x double> [[TMP9]], [[TMP7]] 31; CHECK-NEXT: [[TMP11:%.*]] = fadd <2 x double> [[TMP5]], [[TMP10]] 32; CHECK-NEXT: [[ARRAYIDX25_I:%.*]] = getelementptr inbounds [4 x double], [4 x double]* [[B]], i64 0, i64 2 33; CHECK-NEXT: [[ARRAYIDX30_I:%.*]] = getelementptr inbounds [4 x double], [4 x double]* [[B]], i64 1, i64 2 34; CHECK-NEXT: [[ARRAYIDX37_I:%.*]] = getelementptr inbounds [4 x double], [4 x double]* [[B]], i64 0, i64 3 35; CHECK-NEXT: [[TMP12:%.*]] = bitcast double* [[ARRAYIDX25_I]] to <2 x double>* 36; CHECK-NEXT: [[TMP13:%.*]] = load <2 x double>, <2 x double>* [[TMP12]], align 8 37; CHECK-NEXT: [[TMP14:%.*]] = fmul <2 x double> [[TMP4]], [[TMP13]] 38; CHECK-NEXT: [[ARRAYIDX42_I:%.*]] = getelementptr inbounds [4 x double], [4 x double]* [[B]], i64 1, i64 3 39; CHECK-NEXT: [[TMP15:%.*]] = bitcast double* [[ARRAYIDX30_I]] to <2 x double>* 40; CHECK-NEXT: [[TMP16:%.*]] = load <2 x double>, <2 x double>* [[TMP15]], align 8 41; CHECK-NEXT: [[TMP17:%.*]] = fmul <2 x double> [[TMP9]], [[TMP16]] 42; CHECK-NEXT: [[TMP18:%.*]] = fadd <2 x double> [[TMP14]], [[TMP17]] 43; CHECK-NEXT: [[ARRAYIDX47_I:%.*]] = getelementptr inbounds [2 x double], [2 x double]* [[A]], i64 1, i64 0 44; CHECK-NEXT: [[TEMP10:%.*]] = load double, double* [[ARRAYIDX47_I]], align 8 45; CHECK-NEXT: [[ARRAYIDX52_I:%.*]] = getelementptr inbounds [2 x double], [2 x double]* [[A]], i64 1, i64 1 46; CHECK-NEXT: [[TEMP11:%.*]] = load double, double* [[ARRAYIDX52_I]], align 8 47; CHECK-NEXT: [[TMP19:%.*]] = insertelement <2 x double> undef, double [[TEMP10]], i32 0 48; CHECK-NEXT: [[TMP20:%.*]] = insertelement <2 x double> [[TMP19]], double [[TEMP10]], i32 1 49; CHECK-NEXT: [[TMP21:%.*]] = fmul <2 x double> [[TMP2]], [[TMP20]] 50; CHECK-NEXT: [[TMP22:%.*]] = insertelement <2 x double> undef, double [[TEMP11]], i32 0 51; CHECK-NEXT: [[TMP23:%.*]] = insertelement <2 x double> [[TMP22]], double [[TEMP11]], i32 1 52; CHECK-NEXT: [[TMP24:%.*]] = fmul <2 x double> [[TMP7]], [[TMP23]] 53; CHECK-NEXT: [[TMP25:%.*]] = fadd <2 x double> [[TMP21]], [[TMP24]] 54; CHECK-NEXT: [[TMP26:%.*]] = fmul <2 x double> [[TMP13]], [[TMP20]] 55; CHECK-NEXT: [[TMP27:%.*]] = fmul <2 x double> [[TMP16]], [[TMP23]] 56; CHECK-NEXT: [[TMP28:%.*]] = fadd <2 x double> [[TMP26]], [[TMP27]] 57; CHECK-NEXT: [[RES_I_SROA_4_0_OUT2_I_SROA_IDX2:%.*]] = getelementptr inbounds double, double* [[OUT:%.*]], i64 1 58; CHECK-NEXT: [[TMP29:%.*]] = bitcast double* [[OUT]] to <2 x double>* 59; CHECK-NEXT: store <2 x double> [[TMP11]], <2 x double>* [[TMP29]], align 8 60; CHECK-NEXT: [[RES_I_SROA_5_0_OUT2_I_SROA_IDX4:%.*]] = getelementptr inbounds double, double* [[OUT]], i64 2 61; CHECK-NEXT: [[RES_I_SROA_6_0_OUT2_I_SROA_IDX6:%.*]] = getelementptr inbounds double, double* [[OUT]], i64 3 62; CHECK-NEXT: [[TMP30:%.*]] = bitcast double* [[RES_I_SROA_5_0_OUT2_I_SROA_IDX4]] to <2 x double>* 63; CHECK-NEXT: store <2 x double> [[TMP18]], <2 x double>* [[TMP30]], align 8 64; CHECK-NEXT: [[RES_I_SROA_7_0_OUT2_I_SROA_IDX8:%.*]] = getelementptr inbounds double, double* [[OUT]], i64 4 65; CHECK-NEXT: [[RES_I_SROA_8_0_OUT2_I_SROA_IDX10:%.*]] = getelementptr inbounds double, double* [[OUT]], i64 5 66; CHECK-NEXT: [[TMP31:%.*]] = bitcast double* [[RES_I_SROA_7_0_OUT2_I_SROA_IDX8]] to <2 x double>* 67; CHECK-NEXT: store <2 x double> [[TMP25]], <2 x double>* [[TMP31]], align 8 68; CHECK-NEXT: [[RES_I_SROA_9_0_OUT2_I_SROA_IDX12:%.*]] = getelementptr inbounds double, double* [[OUT]], i64 6 69; CHECK-NEXT: [[RES_I_SROA_10_0_OUT2_I_SROA_IDX14:%.*]] = getelementptr inbounds double, double* [[OUT]], i64 7 70; CHECK-NEXT: [[TMP32:%.*]] = bitcast double* [[RES_I_SROA_9_0_OUT2_I_SROA_IDX12]] to <2 x double>* 71; CHECK-NEXT: store <2 x double> [[TMP28]], <2 x double>* [[TMP32]], align 8 72; CHECK-NEXT: ret void 73; 74 %arrayidx1.i = getelementptr inbounds [2 x double], [2 x double]* %A, i64 0, i64 0 75 %temp = load double, double* %arrayidx1.i, align 8 76 %arrayidx3.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 0, i64 0 77 %temp1 = load double, double* %arrayidx3.i, align 8 78 %mul.i = fmul double %temp, %temp1 79 %arrayidx5.i = getelementptr inbounds [2 x double], [2 x double]* %A, i64 0, i64 1 80 %temp2 = load double, double* %arrayidx5.i, align 8 81 %arrayidx7.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 1, i64 0 82 %temp3 = load double, double* %arrayidx7.i, align 8 83 %mul8.i = fmul double %temp2, %temp3 84 %add.i = fadd double %mul.i, %mul8.i 85 %arrayidx13.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 0, i64 1 86 %temp4 = load double, double* %arrayidx13.i, align 8 87 %mul14.i = fmul double %temp, %temp4 88 %arrayidx18.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 1, i64 1 89 %temp5 = load double, double* %arrayidx18.i, align 8 90 %mul19.i = fmul double %temp2, %temp5 91 %add20.i = fadd double %mul14.i, %mul19.i 92 %arrayidx25.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 0, i64 2 93 %temp6 = load double, double* %arrayidx25.i, align 8 94 %mul26.i = fmul double %temp, %temp6 95 %arrayidx30.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 1, i64 2 96 %temp7 = load double, double* %arrayidx30.i, align 8 97 %mul31.i = fmul double %temp2, %temp7 98 %add32.i = fadd double %mul26.i, %mul31.i 99 %arrayidx37.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 0, i64 3 100 %temp8 = load double, double* %arrayidx37.i, align 8 101 %mul38.i = fmul double %temp, %temp8 102 %arrayidx42.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 1, i64 3 103 %temp9 = load double, double* %arrayidx42.i, align 8 104 %mul43.i = fmul double %temp2, %temp9 105 %add44.i = fadd double %mul38.i, %mul43.i 106 %arrayidx47.i = getelementptr inbounds [2 x double], [2 x double]* %A, i64 1, i64 0 107 %temp10 = load double, double* %arrayidx47.i, align 8 108 %mul50.i = fmul double %temp1, %temp10 109 %arrayidx52.i = getelementptr inbounds [2 x double], [2 x double]* %A, i64 1, i64 1 110 %temp11 = load double, double* %arrayidx52.i, align 8 111 %mul55.i = fmul double %temp3, %temp11 112 %add56.i = fadd double %mul50.i, %mul55.i 113 %mul62.i = fmul double %temp4, %temp10 114 %mul67.i = fmul double %temp5, %temp11 115 %add68.i = fadd double %mul62.i, %mul67.i 116 %mul74.i = fmul double %temp6, %temp10 117 %mul79.i = fmul double %temp7, %temp11 118 %add80.i = fadd double %mul74.i, %mul79.i 119 %mul86.i = fmul double %temp8, %temp10 120 %mul91.i = fmul double %temp9, %temp11 121 %add92.i = fadd double %mul86.i, %mul91.i 122 store double %add.i, double* %Out, align 8 123 %Res.i.sroa.4.0.Out2.i.sroa_idx2 = getelementptr inbounds double, double* %Out, i64 1 124 store double %add20.i, double* %Res.i.sroa.4.0.Out2.i.sroa_idx2, align 8 125 %Res.i.sroa.5.0.Out2.i.sroa_idx4 = getelementptr inbounds double, double* %Out, i64 2 126 store double %add32.i, double* %Res.i.sroa.5.0.Out2.i.sroa_idx4, align 8 127 %Res.i.sroa.6.0.Out2.i.sroa_idx6 = getelementptr inbounds double, double* %Out, i64 3 128 store double %add44.i, double* %Res.i.sroa.6.0.Out2.i.sroa_idx6, align 8 129 %Res.i.sroa.7.0.Out2.i.sroa_idx8 = getelementptr inbounds double, double* %Out, i64 4 130 store double %add56.i, double* %Res.i.sroa.7.0.Out2.i.sroa_idx8, align 8 131 %Res.i.sroa.8.0.Out2.i.sroa_idx10 = getelementptr inbounds double, double* %Out, i64 5 132 store double %add68.i, double* %Res.i.sroa.8.0.Out2.i.sroa_idx10, align 8 133 %Res.i.sroa.9.0.Out2.i.sroa_idx12 = getelementptr inbounds double, double* %Out, i64 6 134 store double %add80.i, double* %Res.i.sroa.9.0.Out2.i.sroa_idx12, align 8 135 %Res.i.sroa.10.0.Out2.i.sroa_idx14 = getelementptr inbounds double, double* %Out, i64 7 136 store double %add92.i, double* %Res.i.sroa.10.0.Out2.i.sroa_idx14, align 8 137 ret void 138} 139 140