/external/llvm-project/llvm/test/CodeGen/ARM/ |
D | 2010-05-20-NEONSpillCrash.ll | 4 ; the @llvm.arm.neon.vld3.v8i8.p0i8 defined three parts of a register. 8 declare %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8.p0i8(i8*, i32) nounwind readonly 10 declare void @llvm.arm.neon.vst3.p0i8.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32) nounwind 13 …%tmp1b = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8.p0i8(i8* %A2, i32 1) ; <%struct._… 16 …%tmp1d = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8.p0i8(i8* %A4, i32 1) ; <%struct._… 19 …%tmp1e = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8.p0i8(i8* %A5, i32 1) ; <%struct._… 21 …%tmp1f = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8.p0i8(i8* %A6, i32 1) ; <%struct._… 23 …%tmp1g = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8.p0i8(i8* %A7, i32 1) ; <%struct._… 26 …%tmp1h = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8.p0i8(i8* %A8, i32 1) ; <%struct._… 33 …call void @llvm.arm.neon.vst3.p0i8.v8i8(i8* %A1, <8 x i8> %tmp4abcd, <8 x i8> zeroinitializer, <8 … [all …]
|
D | pr45824.ll | 23 %v6 = tail call { <8 x i8>, <8 x i8> } @llvm.arm.neon.vld1x2.v8i8.p0i8(i8* %v4) 25 %v8 = tail call { <8 x i8>, <8 x i8> } @llvm.arm.neon.vld1x2.v8i8.p0i8(i8* %v7) 26 …tail call void @llvm.arm.neon.vst3.p0i8.v8i8(i8* undef, <8 x i8> undef, <8 x i8> undef, <8 x i8> u… 49 %v6 = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld1x3.v8i8.p0i8(i8* %v4) 51 %v8 = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld1x3.v8i8.p0i8(i8* %v7) 52 …tail call void @llvm.arm.neon.vst3.p0i8.v8i8(i8* undef, <8 x i8> undef, <8 x i8> undef, <8 x i8> u… 75 …%v6 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld1x4.v8i8.p0i8(i8* %v4) 77 …%v8 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld1x4.v8i8.p0i8(i8* %v7) 78 …tail call void @llvm.arm.neon.vst3.p0i8.v8i8(i8* undef, <8 x i8> undef, <8 x i8> undef, <8 x i8> u… 82 declare void @llvm.arm.neon.vst3.p0i8.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32) [all …]
|
D | arm-matmul.ll | 27 define <2 x i32> @usdot.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8 x i8> %b) { 29 ; CHECK-LABEL: usdot.v2i32.v8i8 31 …%vusdot1.i = tail call <2 x i32> @llvm.arm.neon.usdot.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8 x i… 35 define <2 x i32> @usdot_lane.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8 x i8> %b) { 37 ; CHECK-LABEL: usdot_lane.v2i32.v8i8 42 …%vusdot1.i = tail call <2 x i32> @llvm.arm.neon.usdot.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8 x i… 46 define <2 x i32> @sudot_lane.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8 x i8> %b) { 48 ; CHECK-LABEL: sudot_lane.v2i32.v8i8 53 …%vusdot1.i = tail call <2 x i32> @llvm.arm.neon.usdot.v2i32.v8i8(<2 x i32> %r, <8 x i8> %1, <8 x i… 82 declare <2 x i32> @llvm.arm.neon.usdot.v2i32.v8i8(<2 x i32>, <8 x i8>, <8 x i8>) #2
|
D | vqshrn.ll | 7 …%tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftns.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i… 31 …%tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftnu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i… 55 …%tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftnsu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, … 75 declare <8 x i8> @llvm.arm.neon.vqshiftns.v8i8(<8 x i16>, <8 x i16>) nounwind readnone 79 declare <8 x i8> @llvm.arm.neon.vqshiftnu.v8i8(<8 x i16>, <8 x i16>) nounwind readnone 83 declare <8 x i8> @llvm.arm.neon.vqshiftnsu.v8i8(<8 x i16>, <8 x i16>) nounwind readnone 91 …%tmp2 = call <8 x i8> @llvm.arm.neon.vqrshiftns.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, … 115 …%tmp2 = call <8 x i8> @llvm.arm.neon.vqrshiftnu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, … 139 …%tmp2 = call <8 x i8> @llvm.arm.neon.vqrshiftnsu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8,… 159 declare <8 x i8> @llvm.arm.neon.vqrshiftns.v8i8(<8 x i16>, <8 x i16>) nounwind readnone [all …]
|
D | neon-dot-product.ll | 3 declare <2 x i32> @llvm.arm.neon.udot.v2i32.v8i8(<2 x i32>, <8 x i8>, <8 x i8>) 5 declare <2 x i32> @llvm.arm.neon.sdot.v2i32.v8i8(<2 x i32>, <8 x i8>, <8 x i8>) 12 …%vdot1.i = call <2 x i32> @llvm.arm.neon.udot.v2i32.v8i8(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) #2 28 …%vdot1.i = call <2 x i32> @llvm.arm.neon.sdot.v2i32.v8i8(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) #2 47 …%vdot1.i = call <2 x i32> @llvm.arm.neon.udot.v2i32.v8i8(<2 x i32> %a, <8 x i8> %b, <8 x i8> %.cas… 69 …%vdot1.i = call <2 x i32> @llvm.arm.neon.sdot.v2i32.v8i8(<2 x i32> %a, <8 x i8> %b, <8 x i8> %.cas…
|
D | vpminmax.ll | 8 %tmp3 = call <8 x i8> @llvm.arm.neon.vpmins.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 35 %tmp3 = call <8 x i8> @llvm.arm.neon.vpminu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 66 declare <8 x i8> @llvm.arm.neon.vpmins.v8i8(<8 x i8>, <8 x i8>) nounwind readnone 70 declare <8 x i8> @llvm.arm.neon.vpminu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone 81 %tmp3 = call <8 x i8> @llvm.arm.neon.vpmaxs.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 108 %tmp3 = call <8 x i8> @llvm.arm.neon.vpmaxu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 139 declare <8 x i8> @llvm.arm.neon.vpmaxs.v8i8(<8 x i8>, <8 x i8>) nounwind readnone 143 declare <8 x i8> @llvm.arm.neon.vpmaxu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
|
/external/llvm/test/CodeGen/ARM/ |
D | 2010-05-20-NEONSpillCrash.ll | 4 ; the @llvm.arm.neon.vld3.v8i8.p0i8 defined three parts of a register. 8 declare %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8.p0i8(i8*, i32) nounwind readonly 10 declare void @llvm.arm.neon.vst3.p0i8.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32) nounwind 13 …%tmp1b = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8.p0i8(i8* %A2, i32 1) ; <%struct._… 16 …%tmp1d = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8.p0i8(i8* %A4, i32 1) ; <%struct._… 19 …%tmp1e = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8.p0i8(i8* %A5, i32 1) ; <%struct._… 21 …%tmp1f = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8.p0i8(i8* %A6, i32 1) ; <%struct._… 23 …%tmp1g = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8.p0i8(i8* %A7, i32 1) ; <%struct._… 26 …%tmp1h = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8.p0i8(i8* %A8, i32 1) ; <%struct._… 33 …call void @llvm.arm.neon.vst3.p0i8.v8i8(i8* %A1, <8 x i8> %tmp4abcd, <8 x i8> zeroinitializer, <8 … [all …]
|
D | vqshrn.ll | 7 …%tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftns.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i… 31 …%tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftnu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i… 55 …%tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftnsu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, … 75 declare <8 x i8> @llvm.arm.neon.vqshiftns.v8i8(<8 x i16>, <8 x i16>) nounwind readnone 79 declare <8 x i8> @llvm.arm.neon.vqshiftnu.v8i8(<8 x i16>, <8 x i16>) nounwind readnone 83 declare <8 x i8> @llvm.arm.neon.vqshiftnsu.v8i8(<8 x i16>, <8 x i16>) nounwind readnone 91 …%tmp2 = call <8 x i8> @llvm.arm.neon.vqrshiftns.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, … 115 …%tmp2 = call <8 x i8> @llvm.arm.neon.vqrshiftnu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, … 139 …%tmp2 = call <8 x i8> @llvm.arm.neon.vqrshiftnsu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8,… 159 declare <8 x i8> @llvm.arm.neon.vqrshiftns.v8i8(<8 x i16>, <8 x i16>) nounwind readnone [all …]
|
D | vpminmax.ll | 8 %tmp3 = call <8 x i8> @llvm.arm.neon.vpmins.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 35 %tmp3 = call <8 x i8> @llvm.arm.neon.vpminu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 66 declare <8 x i8> @llvm.arm.neon.vpmins.v8i8(<8 x i8>, <8 x i8>) nounwind readnone 70 declare <8 x i8> @llvm.arm.neon.vpminu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone 81 %tmp3 = call <8 x i8> @llvm.arm.neon.vpmaxs.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 108 %tmp3 = call <8 x i8> @llvm.arm.neon.vpmaxu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 139 declare <8 x i8> @llvm.arm.neon.vpmaxs.v8i8(<8 x i8>, <8 x i8>) nounwind readnone 143 declare <8 x i8> @llvm.arm.neon.vpmaxu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
|
/external/llvm-project/llvm/test/Analysis/CostModel/AArch64/ |
D | vector-reduce.ll | 4 ; COST-LABEL: add.i8.v8i8 5 …ound an estimated cost of 1 for instruction: %r = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %v) 6 ; CODE-LABEL: add.i8.v8i8 8 define i8 @add.i8.v8i8(<8 x i8> %v) { 9 %r = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %v) 49 ; COST-LABEL: umin.i8.v8i8 50 …d an estimated cost of 216 for instruction: %r = call i8 @llvm.vector.reduce.umin.v8i8(<8 x i8> %v) 51 ; CODE-LABEL: umin.i8.v8i8 53 define i8 @umin.i8.v8i8(<8 x i8> %v) { 54 %r = call i8 @llvm.vector.reduce.umin.v8i8(<8 x i8> %v) [all …]
|
D | min-max.ll | 4 ; COST-LABEL: umin.v8i8 5 …an estimated cost of 1 for instruction: %res = call <8 x i8> @llvm.umin.v8i8(<8 x i8> %v0, <8 x … 7 ; CODE-LABEL: umin.v8i8 12 declare <8 x i8> @llvm.umin.v8i8(<8 x i8>, <8 x i8>) 13 define <8 x i8> @umin.v8i8(<8 x i8> %v0, <8 x i8> %v1) { 14 %res = call <8 x i8> @llvm.umin.v8i8(<8 x i8> %v0, <8 x i8> %v1) 132 ; COST-LABEL: smin.v8i8 133 …an estimated cost of 1 for instruction: %res = call <8 x i8> @llvm.smin.v8i8(<8 x i8> %v0, <8 x … 135 ; CODE-LABEL: smin.v8i8 140 declare <8 x i8> @llvm.smin.v8i8(<8 x i8>, <8 x i8>) [all …]
|
/external/llvm-project/llvm/test/Transforms/SLPVectorizer/X86/ |
D | bad-reduction.ll | 4 %v8i8 = type { i8, i8, i8, i8, i8, i8, i8, i8 } 8 define i64 @load_bswap(%v8i8* %p) { 10 ; CHECK-NEXT: [[G0:%.*]] = getelementptr inbounds [[V8I8:%.*]], %v8i8* [[P:%.*]], i64 0, i32 0 11 ; CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds [[V8I8]], %v8i8* [[P]], i64 0, i32 1 12 ; CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds [[V8I8]], %v8i8* [[P]], i64 0, i32 2 13 ; CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds [[V8I8]], %v8i8* [[P]], i64 0, i32 3 14 ; CHECK-NEXT: [[G4:%.*]] = getelementptr inbounds [[V8I8]], %v8i8* [[P]], i64 0, i32 4 15 ; CHECK-NEXT: [[G5:%.*]] = getelementptr inbounds [[V8I8]], %v8i8* [[P]], i64 0, i32 5 16 ; CHECK-NEXT: [[G6:%.*]] = getelementptr inbounds [[V8I8]], %v8i8* [[P]], i64 0, i32 6 17 ; CHECK-NEXT: [[G7:%.*]] = getelementptr inbounds [[V8I8]], %v8i8* [[P]], i64 0, i32 7 [all …]
|
/external/llvm-project/llvm/test/CodeGen/AArch64/ |
D | arm64-tbl.ll | 6 %tmp3 = call <8 x i8> @llvm.aarch64.neon.tbl1.v8i8(<16 x i8> %A, <8 x i8> %B) 20 %tmp3 = call <8 x i8> @llvm.aarch64.neon.tbl2.v8i8(<16 x i8> %A, <16 x i8> %B, <8 x i8> %C) 34 …%tmp3 = call <8 x i8> @llvm.aarch64.neon.tbl3.v8i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <8 x … 48 …%tmp3 = call <8 x i8> @llvm.aarch64.neon.tbl4.v8i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x… 59 declare <8 x i8> @llvm.aarch64.neon.tbl1.v8i8(<16 x i8>, <8 x i8>) nounwind readnone 61 declare <8 x i8> @llvm.aarch64.neon.tbl2.v8i8(<16 x i8>, <16 x i8>, <8 x i8>) nounwind readnone 63 declare <8 x i8> @llvm.aarch64.neon.tbl3.v8i8(<16 x i8>, <16 x i8>, <16 x i8>, <8 x i8>) nounwind r… 65 declare <8 x i8> @llvm.aarch64.neon.tbl4.v8i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <8 x i8>)… 71 %tmp3 = call <8 x i8> @llvm.aarch64.neon.tbx1.v8i8(<8 x i8> %A, <16 x i8> %B, <8 x i8> %C) 85 …%tmp3 = call <8 x i8> @llvm.aarch64.neon.tbx2.v8i8(<8 x i8> %A, <16 x i8> %B, <16 x i8> %C, <8 x i… [all …]
|
D | arm64-copy-tuple.ll | 16 %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8) 20 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) 23 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) 33 %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8) 37 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) 40 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) 50 %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8) 54 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) 57 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) 67 %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8) [all …]
|
D | aarch64-matmul.ll | 27 define <2 x i32> @usdot.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8 x i8> %b) { 29 ; CHECK-LABEL: usdot.v2i32.v8i8 31 …%vusdot1.i = tail call <2 x i32> @llvm.aarch64.neon.usdot.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8… 35 define <2 x i32> @usdot_lane.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8 x i8> %b) { 37 ; CHECK-LABEL: usdot_lane.v2i32.v8i8 42 …%vusdot1.i = tail call <2 x i32> @llvm.aarch64.neon.usdot.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8… 46 define <2 x i32> @sudot_lane.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8 x i8> %b) { 48 ; CHECK-LABEL: sudot_lane.v2i32.v8i8 53 …%vusdot1.i = tail call <2 x i32> @llvm.aarch64.neon.usdot.v2i32.v8i8(<2 x i32> %r, <8 x i8> %1, <8… 64 …%vusdot1.i = tail call <2 x i32> @llvm.aarch64.neon.usdot.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8… [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-tbl.ll | 6 %tmp3 = call <8 x i8> @llvm.aarch64.neon.tbl1.v8i8(<16 x i8> %A, <8 x i8> %B) 20 %tmp3 = call <8 x i8> @llvm.aarch64.neon.tbl2.v8i8(<16 x i8> %A, <16 x i8> %B, <8 x i8> %C) 34 …%tmp3 = call <8 x i8> @llvm.aarch64.neon.tbl3.v8i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <8 x … 48 …%tmp3 = call <8 x i8> @llvm.aarch64.neon.tbl4.v8i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x… 59 declare <8 x i8> @llvm.aarch64.neon.tbl1.v8i8(<16 x i8>, <8 x i8>) nounwind readnone 61 declare <8 x i8> @llvm.aarch64.neon.tbl2.v8i8(<16 x i8>, <16 x i8>, <8 x i8>) nounwind readnone 63 declare <8 x i8> @llvm.aarch64.neon.tbl3.v8i8(<16 x i8>, <16 x i8>, <16 x i8>, <8 x i8>) nounwind r… 65 declare <8 x i8> @llvm.aarch64.neon.tbl4.v8i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <8 x i8>)… 71 %tmp3 = call <8 x i8> @llvm.aarch64.neon.tbx1.v8i8(<8 x i8> %A, <16 x i8> %B, <8 x i8> %C) 85 …%tmp3 = call <8 x i8> @llvm.aarch64.neon.tbx2.v8i8(<8 x i8> %A, <16 x i8> %B, <16 x i8> %C, <8 x i… [all …]
|
D | arm64-copy-tuple.ll | 16 %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8) 20 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) 23 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) 33 %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8) 37 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) 40 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) 50 %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8) 54 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) 57 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) 67 %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8) [all …]
|
/external/clang/test/CodeGen/ |
D | systemz-abi-vector.c | 20 typedef __attribute__((vector_size(8))) char v8i8; typedef 54 v8i8 pass_v8i8(v8i8 arg) { return arg; } in pass_v8i8() 148 struct agg_v8i8 { v8i8 a; }; 285 v8i8 va_v8i8(__builtin_va_list l) { return __builtin_va_arg(l, v8i8); } in va_v8i8()
|
/external/llvm-project/clang/test/CodeGen/SystemZ/ |
D | systemz-abi-vector.c | 30 typedef __attribute__((vector_size(8))) char v8i8; typedef 64 v8i8 pass_v8i8(v8i8 arg) { return arg; } in pass_v8i8() 158 struct agg_v8i8 { v8i8 a; }; 295 v8i8 va_v8i8(__builtin_va_list l) { return __builtin_va_arg(l, v8i8); } in va_v8i8()
|
/external/llvm-project/llvm/unittests/FuzzMutate/ |
D | OperationsTest.cpp | 95 Constant *v8i8 = ConstantVector::getSplat(ElementCount::getFixed(8), i8); in TEST() local 113 EXPECT_TRUE(AnyType.matches({}, v8i8)); in TEST() 117 AnyType.generate({}, {i32->getType(), f16->getType(), v8i8->getType()}), in TEST() 118 Each(AnyOf(TypesMatch(i32), TypesMatch(f16), TypesMatch(v8i8)))); in TEST() 127 AnyInt.generate({}, {i32->getType(), f16->getType(), v8i8->getType()}), in TEST() 138 AnyFP.generate({}, {i32->getType(), f16->getType(), v8i8->getType()}), in TEST() 145 EXPECT_FALSE(AnyPtr.matches({}, v8i8)); in TEST() 149 AnyPtr.generate({}, {i32->getType(), f16->getType(), v8i8->getType()}), in TEST() 153 EXPECT_TRUE(AnyVec.matches({}, v8i8)); in TEST() 159 EXPECT_THAT(AnyVec.generate({}, {v8i8->getType()}), in TEST() [all …]
|
/external/llvm-project/llvm/test/Transforms/LoopStrengthReduce/ARM/ |
D | ivchain-ARM.ll | 327 %vld1 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr, i32 1) 329 %vld2 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr3, i32 1) 331 %vld3 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr7, i32 1) 333 %vld4 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr11, i32 1) 334 %vld5 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %src.addr, i32 1) 336 %vld6 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr17, i32 1) 338 %vld7 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr20, i32 1) 340 %vld8 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr23, i32 1) 341 %vadd1 = tail call <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8> %vld1, <8 x i8> %vld2) nounwind 342 %vadd2 = tail call <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8> %vld2, <8 x i8> %vld3) nounwind [all …]
|
/external/llvm/test/Transforms/LoopStrengthReduce/ARM/ |
D | ivchain-ARM.ll | 331 %vld1 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr, i32 1) 333 %vld2 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr3, i32 1) 335 %vld3 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr7, i32 1) 337 %vld4 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr11, i32 1) 338 %vld5 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %src.addr, i32 1) 340 %vld6 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr17, i32 1) 342 %vld7 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr20, i32 1) 344 %vld8 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr23, i32 1) 345 %vadd1 = tail call <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8> %vld1, <8 x i8> %vld2) nounwind 346 %vadd2 = tail call <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8> %vld2, <8 x i8> %vld3) nounwind [all …]
|
/external/llvm-project/llvm/test/Bitcode/ |
D | arm32_neon_vcnt_upgrade.ll | 16 %tmp2 = call <8 x i8> @llvm.arm.neon.vcnt.v8i8(<8 x i8> %tmp1) 17 ;CHECK: call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> 22 declare <8 x i8> @llvm.arm.neon.vcnt.v8i8(<8 x i8>) nounwind readnone
|
/external/llvm/test/Bitcode/ |
D | arm32_neon_vcnt_upgrade.ll | 16 %tmp2 = call <8 x i8> @llvm.arm.neon.vcnt.v8i8(<8 x i8> %tmp1) 17 ;CHECK: call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> 22 declare <8 x i8> @llvm.arm.neon.vcnt.v8i8(<8 x i8>) nounwind readnone
|
/external/llvm-project/llvm/lib/Target/Hexagon/ |
D | HexagonCallingConv.td | 17 CCIfType<[i64,v2i32,v4i16,v8i8], 43 CCIfType<[i64,v2i32,v4i16,v8i8], 45 CCIfType<[i64,v2i32,v4i16,v8i8], 73 CCIfType<[i64,v2i32,v4i16,v8i8], 75 CCIfType<[i64,v2i32,v4i16,v8i8], 99 CCIfType<[i64,v2i32,v4i16,v8i8],
|