1; RUN: llc < %s -mtriple=armv7-apple-darwin | FileCheck %s 2 3; PR7158 4define i32 @test_pr7158() nounwind { 5bb.nph55.bb.nph55.split_crit_edge: 6 br label %bb3 7 8bb3: ; preds = %bb3, %bb.nph55.bb.nph55.split_crit_edge 9 br i1 undef, label %bb.i19, label %bb3 10 11bb.i19: ; preds = %bb.i19, %bb3 12 %0 = insertelement <4 x float> undef, float undef, i32 3 ; <<4 x float>> [#uses=3] 13 %1 = fmul <4 x float> %0, %0 ; <<4 x float>> [#uses=1] 14 %2 = bitcast <4 x float> %1 to <2 x double> ; <<2 x double>> [#uses=0] 15 %3 = fmul <4 x float> %0, undef ; <<4 x float>> [#uses=0] 16 br label %bb.i19 17} 18 19; Check that the DAG combiner does not arbitrarily modify BUILD_VECTORs 20; after legalization. 21define void @test_illegal_build_vector() nounwind { 22entry: 23 store <2 x i64> undef, <2 x i64>* undef, align 16 24 %0 = load <16 x i8>* undef, align 16 ; <<16 x i8>> [#uses=1] 25 %1 = or <16 x i8> zeroinitializer, %0 ; <<16 x i8>> [#uses=1] 26 store <16 x i8> %1, <16 x i8>* undef, align 16 27 ret void 28} 29 30; Radar 8407927: Make sure that VMOVRRD gets optimized away when the result is 31; converted back to be used as a vector type. 32; CHECK: test_vmovrrd_combine 33define <4 x i32> @test_vmovrrd_combine() nounwind { 34entry: 35 br i1 undef, label %bb1, label %bb2 36 37bb1: 38 %0 = bitcast <2 x i64> zeroinitializer to <2 x double> 39 %1 = extractelement <2 x double> %0, i32 0 40 %2 = bitcast double %1 to i64 41 %3 = insertelement <1 x i64> undef, i64 %2, i32 0 42; CHECK-NOT: vmov s 43; CHECK: vext.8 44 %4 = shufflevector <1 x i64> %3, <1 x i64> undef, <2 x i32> <i32 0, i32 1> 45 %tmp2006.3 = bitcast <2 x i64> %4 to <16 x i8> 46 %5 = shufflevector <16 x i8> %tmp2006.3, <16 x i8> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19> 47 %tmp2004.3 = bitcast <16 x i8> %5 to <4 x i32> 48 br i1 undef, label %bb2, label %bb1 49 50bb2: 51 %result = phi <4 x i32> [ undef, %entry ], [ %tmp2004.3, %bb1 ] 52 ret <4 x i32> %result 53} 54 55; Test trying to do a ShiftCombine on illegal types. 56; The vector should be split first. 57define void @lshrIllegalType(<8 x i32>* %A) nounwind { 58 %tmp1 = load <8 x i32>* %A 59 %tmp2 = lshr <8 x i32> %tmp1, < i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3> 60 store <8 x i32> %tmp2, <8 x i32>* %A 61 ret void 62} 63 64; Test folding a binary vector operation with constant BUILD_VECTOR 65; operands with i16 elements. 66define void @test_i16_constant_fold() nounwind optsize { 67entry: 68 %0 = sext <4 x i1> zeroinitializer to <4 x i16> 69 %1 = add <4 x i16> %0, zeroinitializer 70 %2 = shufflevector <4 x i16> %1, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> 71 %3 = add <8 x i16> %2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> 72 %4 = trunc <8 x i16> %3 to <8 x i8> 73 tail call void @llvm.arm.neon.vst1.v8i8(i8* undef, <8 x i8> %4, i32 1) 74 unreachable 75} 76 77declare void @llvm.arm.neon.vst1.v8i8(i8*, <8 x i8>, i32) nounwind 78 79; Test that loads and stores of i64 vector elements are handled as f64 values 80; so they are not split up into i32 values. Radar 8755338. 81define void @i64_buildvector(i64* %ptr, <2 x i64>* %vp) nounwind { 82; CHECK: i64_buildvector 83; CHECK: vldr 84 %t0 = load i64* %ptr, align 4 85 %t1 = insertelement <2 x i64> undef, i64 %t0, i32 0 86 store <2 x i64> %t1, <2 x i64>* %vp 87 ret void 88} 89 90define void @i64_insertelement(i64* %ptr, <2 x i64>* %vp) nounwind { 91; CHECK: i64_insertelement 92; CHECK: vldr 93 %t0 = load i64* %ptr, align 4 94 %vec = load <2 x i64>* %vp 95 %t1 = insertelement <2 x i64> %vec, i64 %t0, i32 0 96 store <2 x i64> %t1, <2 x i64>* %vp 97 ret void 98} 99 100define void @i64_extractelement(i64* %ptr, <2 x i64>* %vp) nounwind { 101; CHECK: i64_extractelement 102; CHECK: vstr 103 %vec = load <2 x i64>* %vp 104 %t1 = extractelement <2 x i64> %vec, i32 0 105 store i64 %t1, i64* %ptr 106 ret void 107} 108 109; Test trying to do a AND Combine on illegal types. 110define void @andVec(<3 x i8>* %A) nounwind { 111 %tmp = load <3 x i8>* %A, align 4 112 %and = and <3 x i8> %tmp, <i8 7, i8 7, i8 7> 113 store <3 x i8> %and, <3 x i8>* %A 114 ret void 115} 116 117 118; Test trying to do an OR Combine on illegal types. 119define void @orVec(<3 x i8>* %A) nounwind { 120 %tmp = load <3 x i8>* %A, align 4 121 %or = or <3 x i8> %tmp, <i8 7, i8 7, i8 7> 122 store <3 x i8> %or, <3 x i8>* %A 123 ret void 124} 125 126; The following test was hitting an assertion in the DAG combiner when 127; constant folding the multiply because the "sext undef" was translated to 128; a BUILD_VECTOR with i32 0 operands, which did not match the i16 operands 129; of the other BUILD_VECTOR. 130define i16 @foldBuildVectors() { 131 %1 = sext <8 x i8> undef to <8 x i16> 132 %2 = mul <8 x i16> %1, <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255> 133 %3 = extractelement <8 x i16> %2, i32 0 134 ret i16 %3 135} 136 137; Test that we are generating vrev and vext for reverse shuffles of v8i16 138; shuffles. 139; CHECK: reverse_v8i16 140define void @reverse_v8i16(<8 x i16>* %loadaddr, <8 x i16>* %storeaddr) { 141 %v0 = load <8 x i16>* %loadaddr 142 ; CHECK: vrev64.16 143 ; CHECK: vext.16 144 %v1 = shufflevector <8 x i16> %v0, <8 x i16> undef, 145 <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0> 146 store <8 x i16> %v1, <8 x i16>* %storeaddr 147 ret void 148} 149 150; Test that we are generating vrev and vext for reverse shuffles of v16i8 151; shuffles. 152; CHECK: reverse_v16i8 153define void @reverse_v16i8(<16 x i8>* %loadaddr, <16 x i8>* %storeaddr) { 154 %v0 = load <16 x i8>* %loadaddr 155 ; CHECK: vrev64.8 156 ; CHECK: vext.8 157 %v1 = shufflevector <16 x i8> %v0, <16 x i8> undef, 158 <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, 159 i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0> 160 store <16 x i8> %v1, <16 x i8>* %storeaddr 161 ret void 162} 163 164; <rdar://problem/14170854>. 165; vldr cannot handle unaligned loads. 166; Fall back to vld1.32, which can, instead of using the general purpose loads 167; followed by a costly sequence of instructions to build the vector register. 168; CHECK: t3 169; CHECK: vld1.32 {[[REG:d[0-9]+]][0]} 170; CHECK: vld1.32 {[[REG]][1]} 171; CHECK: vmull.u8 q{{[0-9]+}}, [[REG]], [[REG]] 172define <8 x i16> @t3(i8 zeroext %xf, i8* nocapture %sp0, i8* nocapture %sp1, i32* nocapture %outp) { 173entry: 174 %pix_sp0.0.cast = bitcast i8* %sp0 to i32* 175 %pix_sp0.0.copyload = load i32* %pix_sp0.0.cast, align 1 176 %pix_sp1.0.cast = bitcast i8* %sp1 to i32* 177 %pix_sp1.0.copyload = load i32* %pix_sp1.0.cast, align 1 178 %vecinit = insertelement <2 x i32> undef, i32 %pix_sp0.0.copyload, i32 0 179 %vecinit1 = insertelement <2 x i32> %vecinit, i32 %pix_sp1.0.copyload, i32 1 180 %0 = bitcast <2 x i32> %vecinit1 to <8 x i8> 181 %vmull.i = tail call <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8> %0, <8 x i8> %0) 182 ret <8 x i16> %vmull.i 183} 184 185; Function Attrs: nounwind readnone 186declare <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8>, <8 x i8>) 187 188; Check that (insert_vector_elt (load)) => (vector_load). 189; Thus, check that scalar_to_vector do not interfer with that. 190define <8 x i16> @t4(i8* nocapture %sp0) { 191; CHECK: t4 192; CHECK: vld1.32 {{{d[0-9]+}}[0]}, [r0] 193entry: 194 %pix_sp0.0.cast = bitcast i8* %sp0 to i32* 195 %pix_sp0.0.copyload = load i32* %pix_sp0.0.cast, align 1 196 %vec = insertelement <2 x i32> undef, i32 %pix_sp0.0.copyload, i32 0 197 %0 = bitcast <2 x i32> %vec to <8 x i8> 198 %vmull.i = tail call <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8> %0, <8 x i8> %0) 199 ret <8 x i16> %vmull.i 200} 201 202; Make sure vector load is used for all three loads. 203; Lowering to build vector was breaking the single use property of the load of 204; %pix_sp0.0.copyload. 205; CHECK: t5 206; CHECK: vld1.32 {[[REG1:d[0-9]+]][1]}, [r0] 207; CHECK: vorr [[REG2:d[0-9]+]], [[REG1]], [[REG1]] 208; CHECK: vld1.32 {[[REG1]][0]}, [r1] 209; CHECK: vld1.32 {[[REG2]][0]}, [r2] 210; CHECK: vmull.u8 q{{[0-9]+}}, [[REG1]], [[REG2]] 211define <8 x i16> @t5(i8* nocapture %sp0, i8* nocapture %sp1, i8* nocapture %sp2) { 212entry: 213 %pix_sp0.0.cast = bitcast i8* %sp0 to i32* 214 %pix_sp0.0.copyload = load i32* %pix_sp0.0.cast, align 1 215 %pix_sp1.0.cast = bitcast i8* %sp1 to i32* 216 %pix_sp1.0.copyload = load i32* %pix_sp1.0.cast, align 1 217 %pix_sp2.0.cast = bitcast i8* %sp2 to i32* 218 %pix_sp2.0.copyload = load i32* %pix_sp2.0.cast, align 1 219 %vec = insertelement <2 x i32> undef, i32 %pix_sp0.0.copyload, i32 1 220 %vecinit1 = insertelement <2 x i32> %vec, i32 %pix_sp1.0.copyload, i32 0 221 %vecinit2 = insertelement <2 x i32> %vec, i32 %pix_sp2.0.copyload, i32 0 222 %0 = bitcast <2 x i32> %vecinit1 to <8 x i8> 223 %1 = bitcast <2 x i32> %vecinit2 to <8 x i8> 224 %vmull.i = tail call <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8> %0, <8 x i8> %1) 225 ret <8 x i16> %vmull.i 226} 227