1; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -verify-machineinstrs 2; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -verify-machineinstrs -O0 3target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32-n32" 4target triple = "thumbv7-apple-darwin10" 5 6; This function would crash LiveIntervalAnalysis by creating a chain of 4 INSERT_SUBREGs of the same register. 7define arm_apcscc void @NEON_vst4q_u32(i32* nocapture %sp0, i32* nocapture %sp1, i32* nocapture %sp2, i32* nocapture %sp3, i32* %dp) nounwind { 8entry: 9 %0 = bitcast i32* %sp0 to <4 x i32>* ; <<4 x i32>*> [#uses=1] 10 %1 = load <4 x i32>* %0, align 16 ; <<4 x i32>> [#uses=1] 11 %2 = bitcast i32* %sp1 to <4 x i32>* ; <<4 x i32>*> [#uses=1] 12 %3 = load <4 x i32>* %2, align 16 ; <<4 x i32>> [#uses=1] 13 %4 = bitcast i32* %sp2 to <4 x i32>* ; <<4 x i32>*> [#uses=1] 14 %5 = load <4 x i32>* %4, align 16 ; <<4 x i32>> [#uses=1] 15 %6 = bitcast i32* %sp3 to <4 x i32>* ; <<4 x i32>*> [#uses=1] 16 %7 = load <4 x i32>* %6, align 16 ; <<4 x i32>> [#uses=1] 17 %8 = bitcast i32* %dp to i8* ; <i8*> [#uses=1] 18 tail call void @llvm.arm.neon.vst4.v4i32(i8* %8, <4 x i32> %1, <4 x i32> %3, <4 x i32> %5, <4 x i32> %7, i32 1) 19 ret void 20} 21 22declare void @llvm.arm.neon.vst4.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32) nounwind 23 24@sbuf = common global [16 x i32] zeroinitializer, align 16 ; <[16 x i32]*> [#uses=5] 25@dbuf = common global [16 x i32] zeroinitializer ; <[16 x i32]*> [#uses=2] 26 27; This function creates 4 chained INSERT_SUBREGS and then invokes the register scavenger. 28; The first INSERT_SUBREG needs an <undef> use operand for that to work. 29define arm_apcscc i32 @main() nounwind { 30bb.nph: 31 br label %bb 32 33bb: ; preds = %bb, %bb.nph 34 %0 = phi i32 [ 0, %bb.nph ], [ %1, %bb ] ; <i32> [#uses=4] 35 %scevgep = getelementptr [16 x i32]* @sbuf, i32 0, i32 %0 ; <i32*> [#uses=1] 36 %scevgep5 = getelementptr [16 x i32]* @dbuf, i32 0, i32 %0 ; <i32*> [#uses=1] 37 store i32 %0, i32* %scevgep, align 4 38 store i32 -1, i32* %scevgep5, align 4 39 %1 = add nsw i32 %0, 1 ; <i32> [#uses=2] 40 %exitcond = icmp eq i32 %1, 16 ; <i1> [#uses=1] 41 br i1 %exitcond, label %bb2, label %bb 42 43bb2: ; preds = %bb 44 %2 = load <4 x i32>* bitcast ([16 x i32]* @sbuf to <4 x i32>*), align 16 ; <<4 x i32>> [#uses=1] 45 %3 = load <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32]* @sbuf, i32 0, i32 4) to <4 x i32>*), align 16 ; <<4 x i32>> [#uses=1] 46 %4 = load <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32]* @sbuf, i32 0, i32 8) to <4 x i32>*), align 16 ; <<4 x i32>> [#uses=1] 47 %5 = load <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32]* @sbuf, i32 0, i32 12) to <4 x i32>*), align 16 ; <<4 x i32>> [#uses=1] 48 tail call void @llvm.arm.neon.vst4.v4i32(i8* bitcast ([16 x i32]* @dbuf to i8*), <4 x i32> %2, <4 x i32> %3, <4 x i32> %4, <4 x i32> %5, i32 1) nounwind 49 ret i32 0 50} 51 52; PR12389 53; Make sure the DPair register class can spill. 54define void @pr12389(i8* %p) nounwind ssp { 55entry: 56 %vld1 = tail call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* %p, i32 1) 57 tail call void asm sideeffect "", "~{q0},~{q1},~{q2},~{q3},~{q4},~{q5},~{q6},~{q7},~{q8},~{q9},~{q10},~{q11},~{q12},~{q13},~{q14},~{q15}"() nounwind 58 tail call void @llvm.arm.neon.vst1.v4f32(i8* %p, <4 x float> %vld1, i32 1) 59 ret void 60} 61 62declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*, i32) nounwind readonly 63 64declare void @llvm.arm.neon.vst1.v4f32(i8*, <4 x float>, i32) nounwind 65 66; <rdar://problem/11101911> 67; When an strd is expanded into two str instructions, make sure the first str 68; doesn't kill the base register. This can happen if the base register is the 69; same as the data register. 70%class = type { i8*, %class*, i32 } 71define void @f11101911(%class* %this, i32 %num) ssp align 2 { 72entry: 73 %p1 = getelementptr inbounds %class* %this, i32 0, i32 1 74 %p2 = getelementptr inbounds %class* %this, i32 0, i32 2 75 tail call void asm sideeffect "", "~{r1},~{r3},~{r5},~{r11},~{r13}"() nounwind 76 store %class* %this, %class** %p1, align 4 77 store i32 %num, i32* %p2, align 4 78 ret void 79} 80 81; Check RAFast handling of inline assembly with many dense clobbers. 82; The large tuple aliases of the vector registers can cause problems. 83define void @rdar13249625(double* nocapture %p) nounwind { 84 %1 = tail call double asm sideeffect "@ $0", "=w,~{d0},~{q1},~{q2},~{q3},~{q4},~{q5},~{q6},~{q7},~{q8},~{q9},~{q10},~{q11},~{q12},~{q13},~{q14},~{q15}"() nounwind 85 store double %1, double* %p, align 4 86 ret void 87} 88