1; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s 2 3define <8 x i8> @vld1dupi8(i8* %A) nounwind { 4;CHECK: vld1dupi8: 5;Check the (default) alignment value. 6;CHECK: vld1.8 {d16[]}, [r0] 7 %tmp1 = load i8* %A, align 8 8 %tmp2 = insertelement <8 x i8> undef, i8 %tmp1, i32 0 9 %tmp3 = shufflevector <8 x i8> %tmp2, <8 x i8> undef, <8 x i32> zeroinitializer 10 ret <8 x i8> %tmp3 11} 12 13define <4 x i16> @vld1dupi16(i16* %A) nounwind { 14;CHECK: vld1dupi16: 15;Check the alignment value. Max for this instruction is 16 bits: 16;CHECK: vld1.16 {d16[]}, [r0, :16] 17 %tmp1 = load i16* %A, align 8 18 %tmp2 = insertelement <4 x i16> undef, i16 %tmp1, i32 0 19 %tmp3 = shufflevector <4 x i16> %tmp2, <4 x i16> undef, <4 x i32> zeroinitializer 20 ret <4 x i16> %tmp3 21} 22 23define <2 x i32> @vld1dupi32(i32* %A) nounwind { 24;CHECK: vld1dupi32: 25;Check the alignment value. Max for this instruction is 32 bits: 26;CHECK: vld1.32 {d16[]}, [r0, :32] 27 %tmp1 = load i32* %A, align 8 28 %tmp2 = insertelement <2 x i32> undef, i32 %tmp1, i32 0 29 %tmp3 = shufflevector <2 x i32> %tmp2, <2 x i32> undef, <2 x i32> zeroinitializer 30 ret <2 x i32> %tmp3 31} 32 33define <2 x float> @vld1dupf(float* %A) nounwind { 34;CHECK: vld1dupf: 35;CHECK: vld1.32 {d16[]}, [r0] 36 %tmp0 = load float* %A 37 %tmp1 = insertelement <2 x float> undef, float %tmp0, i32 0 38 %tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> zeroinitializer 39 ret <2 x float> %tmp2 40} 41 42define <16 x i8> @vld1dupQi8(i8* %A) nounwind { 43;CHECK: vld1dupQi8: 44;Check the (default) alignment value. 45;CHECK: vld1.8 {d16[], d17[]}, [r0] 46 %tmp1 = load i8* %A, align 8 47 %tmp2 = insertelement <16 x i8> undef, i8 %tmp1, i32 0 48 %tmp3 = shufflevector <16 x i8> %tmp2, <16 x i8> undef, <16 x i32> zeroinitializer 49 ret <16 x i8> %tmp3 50} 51 52define <4 x float> @vld1dupQf(float* %A) nounwind { 53;CHECK: vld1dupQf: 54;CHECK: vld1.32 {d16[], d17[]}, [r0] 55 %tmp0 = load float* %A 56 %tmp1 = insertelement <4 x float> undef, float %tmp0, i32 0 57 %tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> zeroinitializer 58 ret <4 x float> %tmp2 59} 60 61%struct.__neon_int8x8x2_t = type { <8 x i8>, <8 x i8> } 62%struct.__neon_int4x16x2_t = type { <4 x i16>, <4 x i16> } 63%struct.__neon_int2x32x2_t = type { <2 x i32>, <2 x i32> } 64 65define <8 x i8> @vld2dupi8(i8* %A) nounwind { 66;CHECK: vld2dupi8: 67;Check the (default) alignment value. 68;CHECK: vld2.8 {d16[], d17[]}, [r0] 69 %tmp0 = tail call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8* %A, <8 x i8> undef, <8 x i8> undef, i32 0, i32 1) 70 %tmp1 = extractvalue %struct.__neon_int8x8x2_t %tmp0, 0 71 %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> zeroinitializer 72 %tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp0, 1 73 %tmp4 = shufflevector <8 x i8> %tmp3, <8 x i8> undef, <8 x i32> zeroinitializer 74 %tmp5 = add <8 x i8> %tmp2, %tmp4 75 ret <8 x i8> %tmp5 76} 77 78define <4 x i16> @vld2dupi16(i16* %A) nounwind { 79;CHECK: vld2dupi16: 80;Check that a power-of-two alignment smaller than the total size of the memory 81;being loaded is ignored. 82;CHECK: vld2.16 {d16[], d17[]}, [r0] 83 %tmp0 = tail call %struct.__neon_int4x16x2_t @llvm.arm.neon.vld2lane.v4i16(i16* %A, <4 x i16> undef, <4 x i16> undef, i32 0, i32 2) 84 %tmp1 = extractvalue %struct.__neon_int4x16x2_t %tmp0, 0 85 %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> zeroinitializer 86 %tmp3 = extractvalue %struct.__neon_int4x16x2_t %tmp0, 1 87 %tmp4 = shufflevector <4 x i16> %tmp3, <4 x i16> undef, <4 x i32> zeroinitializer 88 %tmp5 = add <4 x i16> %tmp2, %tmp4 89 ret <4 x i16> %tmp5 90} 91 92;Check for a post-increment updating load. 93define <4 x i16> @vld2dupi16_update(i16** %ptr) nounwind { 94;CHECK: vld2dupi16_update: 95;CHECK: vld2.16 {d16[], d17[]}, [r1]! 96 %A = load i16** %ptr 97 %tmp0 = tail call %struct.__neon_int4x16x2_t @llvm.arm.neon.vld2lane.v4i16(i16* %A, <4 x i16> undef, <4 x i16> undef, i32 0, i32 2) 98 %tmp1 = extractvalue %struct.__neon_int4x16x2_t %tmp0, 0 99 %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> zeroinitializer 100 %tmp3 = extractvalue %struct.__neon_int4x16x2_t %tmp0, 1 101 %tmp4 = shufflevector <4 x i16> %tmp3, <4 x i16> undef, <4 x i32> zeroinitializer 102 %tmp5 = add <4 x i16> %tmp2, %tmp4 103 %tmp6 = getelementptr i16* %A, i32 2 104 store i16* %tmp6, i16** %ptr 105 ret <4 x i16> %tmp5 106} 107 108define <2 x i32> @vld2dupi32(i32* %A) nounwind { 109;CHECK: vld2dupi32: 110;Check the alignment value. Max for this instruction is 64 bits: 111;CHECK: vld2.32 {d16[], d17[]}, [r0, :64] 112 %tmp0 = tail call %struct.__neon_int2x32x2_t @llvm.arm.neon.vld2lane.v2i32(i32* %A, <2 x i32> undef, <2 x i32> undef, i32 0, i32 16) 113 %tmp1 = extractvalue %struct.__neon_int2x32x2_t %tmp0, 0 114 %tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> zeroinitializer 115 %tmp3 = extractvalue %struct.__neon_int2x32x2_t %tmp0, 1 116 %tmp4 = shufflevector <2 x i32> %tmp3, <2 x i32> undef, <2 x i32> zeroinitializer 117 %tmp5 = add <2 x i32> %tmp2, %tmp4 118 ret <2 x i32> %tmp5 119} 120 121declare %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8*, <8 x i8>, <8 x i8>, i32, i32) nounwind readonly 122declare %struct.__neon_int4x16x2_t @llvm.arm.neon.vld2lane.v4i16(i16*, <4 x i16>, <4 x i16>, i32, i32) nounwind readonly 123declare %struct.__neon_int2x32x2_t @llvm.arm.neon.vld2lane.v2i32(i32*, <2 x i32>, <2 x i32>, i32, i32) nounwind readonly 124 125%struct.__neon_int8x8x3_t = type { <8 x i8>, <8 x i8>, <8 x i8> } 126%struct.__neon_int16x4x3_t = type { <4 x i16>, <4 x i16>, <4 x i16> } 127 128;Check for a post-increment updating load with register increment. 129define <8 x i8> @vld3dupi8_update(i8** %ptr, i32 %inc) nounwind { 130;CHECK: vld3dupi8_update: 131;CHECK: vld3.8 {d16[], d17[], d18[]}, [r2], r1 132 %A = load i8** %ptr 133 %tmp0 = tail call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3lane.v8i8(i8* %A, <8 x i8> undef, <8 x i8> undef, <8 x i8> undef, i32 0, i32 8) 134 %tmp1 = extractvalue %struct.__neon_int8x8x3_t %tmp0, 0 135 %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> zeroinitializer 136 %tmp3 = extractvalue %struct.__neon_int8x8x3_t %tmp0, 1 137 %tmp4 = shufflevector <8 x i8> %tmp3, <8 x i8> undef, <8 x i32> zeroinitializer 138 %tmp5 = extractvalue %struct.__neon_int8x8x3_t %tmp0, 2 139 %tmp6 = shufflevector <8 x i8> %tmp5, <8 x i8> undef, <8 x i32> zeroinitializer 140 %tmp7 = add <8 x i8> %tmp2, %tmp4 141 %tmp8 = add <8 x i8> %tmp7, %tmp6 142 %tmp9 = getelementptr i8* %A, i32 %inc 143 store i8* %tmp9, i8** %ptr 144 ret <8 x i8> %tmp8 145} 146 147define <4 x i16> @vld3dupi16(i16* %A) nounwind { 148;CHECK: vld3dupi16: 149;Check the (default) alignment value. VLD3 does not support alignment. 150;CHECK: vld3.16 {d16[], d17[], d18[]}, [r0] 151 %tmp0 = tail call %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16(i16* %A, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, i32 0, i32 8) 152 %tmp1 = extractvalue %struct.__neon_int16x4x3_t %tmp0, 0 153 %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> zeroinitializer 154 %tmp3 = extractvalue %struct.__neon_int16x4x3_t %tmp0, 1 155 %tmp4 = shufflevector <4 x i16> %tmp3, <4 x i16> undef, <4 x i32> zeroinitializer 156 %tmp5 = extractvalue %struct.__neon_int16x4x3_t %tmp0, 2 157 %tmp6 = shufflevector <4 x i16> %tmp5, <4 x i16> undef, <4 x i32> zeroinitializer 158 %tmp7 = add <4 x i16> %tmp2, %tmp4 159 %tmp8 = add <4 x i16> %tmp7, %tmp6 160 ret <4 x i16> %tmp8 161} 162 163declare %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32, i32) nounwind readonly 164declare %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16(i16*, <4 x i16>, <4 x i16>, <4 x i16>, i32, i32) nounwind readonly 165 166%struct.__neon_int16x4x4_t = type { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } 167%struct.__neon_int32x2x4_t = type { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } 168 169;Check for a post-increment updating load. 170define <4 x i16> @vld4dupi16_update(i16** %ptr) nounwind { 171;CHECK: vld4dupi16_update: 172;CHECK: vld4.16 {d16[], d17[], d18[], d19[]}, [r1]! 173 %A = load i16** %ptr 174 %tmp0 = tail call %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16(i16* %A, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, i32 0, i32 1) 175 %tmp1 = extractvalue %struct.__neon_int16x4x4_t %tmp0, 0 176 %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> zeroinitializer 177 %tmp3 = extractvalue %struct.__neon_int16x4x4_t %tmp0, 1 178 %tmp4 = shufflevector <4 x i16> %tmp3, <4 x i16> undef, <4 x i32> zeroinitializer 179 %tmp5 = extractvalue %struct.__neon_int16x4x4_t %tmp0, 2 180 %tmp6 = shufflevector <4 x i16> %tmp5, <4 x i16> undef, <4 x i32> zeroinitializer 181 %tmp7 = extractvalue %struct.__neon_int16x4x4_t %tmp0, 3 182 %tmp8 = shufflevector <4 x i16> %tmp7, <4 x i16> undef, <4 x i32> zeroinitializer 183 %tmp9 = add <4 x i16> %tmp2, %tmp4 184 %tmp10 = add <4 x i16> %tmp6, %tmp8 185 %tmp11 = add <4 x i16> %tmp9, %tmp10 186 %tmp12 = getelementptr i16* %A, i32 4 187 store i16* %tmp12, i16** %ptr 188 ret <4 x i16> %tmp11 189} 190 191define <2 x i32> @vld4dupi32(i32* %A) nounwind { 192;CHECK: vld4dupi32: 193;Check the alignment value. An 8-byte alignment is allowed here even though 194;it is smaller than the total size of the memory being loaded. 195;CHECK: vld4.32 {d16[], d17[], d18[], d19[]}, [r0, :64] 196 %tmp0 = tail call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i32* %A, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, i32 0, i32 8) 197 %tmp1 = extractvalue %struct.__neon_int32x2x4_t %tmp0, 0 198 %tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> zeroinitializer 199 %tmp3 = extractvalue %struct.__neon_int32x2x4_t %tmp0, 1 200 %tmp4 = shufflevector <2 x i32> %tmp3, <2 x i32> undef, <2 x i32> zeroinitializer 201 %tmp5 = extractvalue %struct.__neon_int32x2x4_t %tmp0, 2 202 %tmp6 = shufflevector <2 x i32> %tmp5, <2 x i32> undef, <2 x i32> zeroinitializer 203 %tmp7 = extractvalue %struct.__neon_int32x2x4_t %tmp0, 3 204 %tmp8 = shufflevector <2 x i32> %tmp7, <2 x i32> undef, <2 x i32> zeroinitializer 205 %tmp9 = add <2 x i32> %tmp2, %tmp4 206 %tmp10 = add <2 x i32> %tmp6, %tmp8 207 %tmp11 = add <2 x i32> %tmp9, %tmp10 208 ret <2 x i32> %tmp11 209} 210 211declare %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16(i16*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i32, i32) nounwind readonly 212declare %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i32*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32, i32) nounwind readonly 213