/external/swiftshader/third_party/LLVM/test/CodeGen/ARM/ |
D | 2010-05-20-NEONSpillCrash.ll | 4 ; the @llvm.arm.neon.vld3.v8i8 defined three parts of a register. 8 declare %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8*, i32) nounwind readonly 10 declare void @llvm.arm.neon.vst3.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32) nounwind 13 …%tmp1b = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A2, i32 1) ; <%struct.__neon… 16 …%tmp1d = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A4, i32 1) ; <%struct.__neon… 19 …%tmp1e = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A5, i32 1) ; <%struct.__neon… 21 …%tmp1f = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A6, i32 1) ; <%struct.__neon… 23 …%tmp1g = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A7, i32 1) ; <%struct.__neon… 26 …%tmp1h = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A8, i32 1) ; <%struct.__neon… 33 …call void @llvm.arm.neon.vst3.v8i8(i8* %A1, <8 x i8> %tmp4abcd, <8 x i8> zeroinitializer, <8 x i8>… [all …]
|
D | vqshrn.ll | 7 …%tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftns.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i… 31 …%tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftnu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i… 55 …%tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftnsu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, … 75 declare <8 x i8> @llvm.arm.neon.vqshiftns.v8i8(<8 x i16>, <8 x i16>) nounwind readnone 79 declare <8 x i8> @llvm.arm.neon.vqshiftnu.v8i8(<8 x i16>, <8 x i16>) nounwind readnone 83 declare <8 x i8> @llvm.arm.neon.vqshiftnsu.v8i8(<8 x i16>, <8 x i16>) nounwind readnone 91 …%tmp2 = call <8 x i8> @llvm.arm.neon.vqrshiftns.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, … 115 …%tmp2 = call <8 x i8> @llvm.arm.neon.vqrshiftnu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, … 139 …%tmp2 = call <8 x i8> @llvm.arm.neon.vqrshiftnsu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8,… 159 declare <8 x i8> @llvm.arm.neon.vqrshiftns.v8i8(<8 x i16>, <8 x i16>) nounwind readnone [all …]
|
D | vpminmax.ll | 8 %tmp3 = call <8 x i8> @llvm.arm.neon.vpmins.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 35 %tmp3 = call <8 x i8> @llvm.arm.neon.vpminu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 66 declare <8 x i8> @llvm.arm.neon.vpmins.v8i8(<8 x i8>, <8 x i8>) nounwind readnone 70 declare <8 x i8> @llvm.arm.neon.vpminu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone 81 %tmp3 = call <8 x i8> @llvm.arm.neon.vpmaxs.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 108 %tmp3 = call <8 x i8> @llvm.arm.neon.vpmaxu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 139 declare <8 x i8> @llvm.arm.neon.vpmaxs.v8i8(<8 x i8>, <8 x i8>) nounwind readnone 143 declare <8 x i8> @llvm.arm.neon.vpmaxu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/ARM/ |
D | 2010-05-20-NEONSpillCrash.ll | 4 ; the @llvm.arm.neon.vld3.v8i8.p0i8 defined three parts of a register. 8 declare %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8.p0i8(i8*, i32) nounwind readonly 10 declare void @llvm.arm.neon.vst3.p0i8.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32) nounwind 13 …%tmp1b = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8.p0i8(i8* %A2, i32 1) ; <%struct._… 16 …%tmp1d = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8.p0i8(i8* %A4, i32 1) ; <%struct._… 19 …%tmp1e = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8.p0i8(i8* %A5, i32 1) ; <%struct._… 21 …%tmp1f = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8.p0i8(i8* %A6, i32 1) ; <%struct._… 23 …%tmp1g = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8.p0i8(i8* %A7, i32 1) ; <%struct._… 26 …%tmp1h = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8.p0i8(i8* %A8, i32 1) ; <%struct._… 33 …call void @llvm.arm.neon.vst3.p0i8.v8i8(i8* %A1, <8 x i8> %tmp4abcd, <8 x i8> zeroinitializer, <8 … [all …]
|
D | vqshrn.ll | 7 …%tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftns.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i… 31 …%tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftnu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i… 55 …%tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftnsu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, … 75 declare <8 x i8> @llvm.arm.neon.vqshiftns.v8i8(<8 x i16>, <8 x i16>) nounwind readnone 79 declare <8 x i8> @llvm.arm.neon.vqshiftnu.v8i8(<8 x i16>, <8 x i16>) nounwind readnone 83 declare <8 x i8> @llvm.arm.neon.vqshiftnsu.v8i8(<8 x i16>, <8 x i16>) nounwind readnone 91 …%tmp2 = call <8 x i8> @llvm.arm.neon.vqrshiftns.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, … 115 …%tmp2 = call <8 x i8> @llvm.arm.neon.vqrshiftnu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, … 139 …%tmp2 = call <8 x i8> @llvm.arm.neon.vqrshiftnsu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8,… 159 declare <8 x i8> @llvm.arm.neon.vqrshiftns.v8i8(<8 x i16>, <8 x i16>) nounwind readnone [all …]
|
D | neon-dot-product.ll | 3 declare <2 x i32> @llvm.arm.neon.udot.v2i32.v8i8(<2 x i32>, <8 x i8>, <8 x i8>) 5 declare <2 x i32> @llvm.arm.neon.sdot.v2i32.v8i8(<2 x i32>, <8 x i8>, <8 x i8>) 12 …%vdot1.i = call <2 x i32> @llvm.arm.neon.udot.v2i32.v8i8(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) #2 28 …%vdot1.i = call <2 x i32> @llvm.arm.neon.sdot.v2i32.v8i8(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) #2 47 …%vdot1.i = call <2 x i32> @llvm.arm.neon.udot.v2i32.v8i8(<2 x i32> %a, <8 x i8> %b, <8 x i8> %.cas… 69 …%vdot1.i = call <2 x i32> @llvm.arm.neon.sdot.v2i32.v8i8(<2 x i32> %a, <8 x i8> %b, <8 x i8> %.cas…
|
D | vpminmax.ll | 8 %tmp3 = call <8 x i8> @llvm.arm.neon.vpmins.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 35 %tmp3 = call <8 x i8> @llvm.arm.neon.vpminu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 66 declare <8 x i8> @llvm.arm.neon.vpmins.v8i8(<8 x i8>, <8 x i8>) nounwind readnone 70 declare <8 x i8> @llvm.arm.neon.vpminu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone 81 %tmp3 = call <8 x i8> @llvm.arm.neon.vpmaxs.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 108 %tmp3 = call <8 x i8> @llvm.arm.neon.vpmaxu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 139 declare <8 x i8> @llvm.arm.neon.vpmaxs.v8i8(<8 x i8>, <8 x i8>) nounwind readnone 143 declare <8 x i8> @llvm.arm.neon.vpmaxu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
|
/external/llvm/test/CodeGen/ARM/ |
D | 2010-05-20-NEONSpillCrash.ll | 4 ; the @llvm.arm.neon.vld3.v8i8.p0i8 defined three parts of a register. 8 declare %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8.p0i8(i8*, i32) nounwind readonly 10 declare void @llvm.arm.neon.vst3.p0i8.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32) nounwind 13 …%tmp1b = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8.p0i8(i8* %A2, i32 1) ; <%struct._… 16 …%tmp1d = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8.p0i8(i8* %A4, i32 1) ; <%struct._… 19 …%tmp1e = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8.p0i8(i8* %A5, i32 1) ; <%struct._… 21 …%tmp1f = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8.p0i8(i8* %A6, i32 1) ; <%struct._… 23 …%tmp1g = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8.p0i8(i8* %A7, i32 1) ; <%struct._… 26 …%tmp1h = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8.p0i8(i8* %A8, i32 1) ; <%struct._… 33 …call void @llvm.arm.neon.vst3.p0i8.v8i8(i8* %A1, <8 x i8> %tmp4abcd, <8 x i8> zeroinitializer, <8 … [all …]
|
D | vqshrn.ll | 7 …%tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftns.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i… 31 …%tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftnu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i… 55 …%tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftnsu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, … 75 declare <8 x i8> @llvm.arm.neon.vqshiftns.v8i8(<8 x i16>, <8 x i16>) nounwind readnone 79 declare <8 x i8> @llvm.arm.neon.vqshiftnu.v8i8(<8 x i16>, <8 x i16>) nounwind readnone 83 declare <8 x i8> @llvm.arm.neon.vqshiftnsu.v8i8(<8 x i16>, <8 x i16>) nounwind readnone 91 …%tmp2 = call <8 x i8> @llvm.arm.neon.vqrshiftns.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, … 115 …%tmp2 = call <8 x i8> @llvm.arm.neon.vqrshiftnu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, … 139 …%tmp2 = call <8 x i8> @llvm.arm.neon.vqrshiftnsu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8,… 159 declare <8 x i8> @llvm.arm.neon.vqrshiftns.v8i8(<8 x i16>, <8 x i16>) nounwind readnone [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/CostModel/AArch64/ |
D | vector-reduce.ll | 4 ; COST-LABEL: add.i8.v8i8 5 …d cost of 1 for instruction: %r = call i8 @llvm.experimental.vector.reduce.add.i8.v8i8(<8 x i8> %v) 6 ; CODE-LABEL: add.i8.v8i8 8 define i8 @add.i8.v8i8(<8 x i8> %v) { 9 %r = call i8 @llvm.experimental.vector.reduce.add.i8.v8i8(<8 x i8> %v) 49 ; COST-LABEL: umin.i8.v8i8 50 …ost of 157 for instruction: %r = call i8 @llvm.experimental.vector.reduce.umin.i8.v8i8(<8 x i8> %v) 51 ; CODE-LABEL: umin.i8.v8i8 53 define i8 @umin.i8.v8i8(<8 x i8> %v) { 54 %r = call i8 @llvm.experimental.vector.reduce.umin.i8.v8i8(<8 x i8> %v) [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-tbl.ll | 6 %tmp3 = call <8 x i8> @llvm.aarch64.neon.tbl1.v8i8(<16 x i8> %A, <8 x i8> %B) 20 %tmp3 = call <8 x i8> @llvm.aarch64.neon.tbl2.v8i8(<16 x i8> %A, <16 x i8> %B, <8 x i8> %C) 34 …%tmp3 = call <8 x i8> @llvm.aarch64.neon.tbl3.v8i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <8 x … 48 …%tmp3 = call <8 x i8> @llvm.aarch64.neon.tbl4.v8i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x… 59 declare <8 x i8> @llvm.aarch64.neon.tbl1.v8i8(<16 x i8>, <8 x i8>) nounwind readnone 61 declare <8 x i8> @llvm.aarch64.neon.tbl2.v8i8(<16 x i8>, <16 x i8>, <8 x i8>) nounwind readnone 63 declare <8 x i8> @llvm.aarch64.neon.tbl3.v8i8(<16 x i8>, <16 x i8>, <16 x i8>, <8 x i8>) nounwind r… 65 declare <8 x i8> @llvm.aarch64.neon.tbl4.v8i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <8 x i8>)… 71 %tmp3 = call <8 x i8> @llvm.aarch64.neon.tbx1.v8i8(<8 x i8> %A, <16 x i8> %B, <8 x i8> %C) 85 …%tmp3 = call <8 x i8> @llvm.aarch64.neon.tbx2.v8i8(<8 x i8> %A, <16 x i8> %B, <16 x i8> %C, <8 x i… [all …]
|
D | arm64-copy-tuple.ll | 16 %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8) 20 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) 23 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) 33 %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8) 37 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) 40 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) 50 %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8) 54 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) 57 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) 67 %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8) [all …]
|
D | arm64-neon-across.ll | 19 declare i32 @llvm.aarch64.neon.saddv.i32.v8i8(<8 x i8>) 35 declare i32 @llvm.aarch64.neon.uminv.i32.v8i8(<8 x i8>) 39 declare i32 @llvm.aarch64.neon.sminv.i32.v8i8(<8 x i8>) 55 declare i32 @llvm.aarch64.neon.umaxv.i32.v8i8(<8 x i8>) 59 declare i32 @llvm.aarch64.neon.smaxv.i32.v8i8(<8 x i8>) 75 declare i32 @llvm.aarch64.neon.uaddlv.i32.v8i8(<8 x i8>) 79 declare i32 @llvm.aarch64.neon.saddlv.i32.v8i8(<8 x i8>) 85 %saddlvv.i = tail call i32 @llvm.aarch64.neon.saddlv.i32.v8i8(<8 x i8> %a) 102 %uaddlvv.i = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v8i8(<8 x i8> %a) 169 %smaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v8i8(<8 x i8> %a) [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/ |
D | arm64-tbl.ll | 6 %tmp3 = call <8 x i8> @llvm.aarch64.neon.tbl1.v8i8(<16 x i8> %A, <8 x i8> %B) 20 %tmp3 = call <8 x i8> @llvm.aarch64.neon.tbl2.v8i8(<16 x i8> %A, <16 x i8> %B, <8 x i8> %C) 34 …%tmp3 = call <8 x i8> @llvm.aarch64.neon.tbl3.v8i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <8 x … 48 …%tmp3 = call <8 x i8> @llvm.aarch64.neon.tbl4.v8i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x… 59 declare <8 x i8> @llvm.aarch64.neon.tbl1.v8i8(<16 x i8>, <8 x i8>) nounwind readnone 61 declare <8 x i8> @llvm.aarch64.neon.tbl2.v8i8(<16 x i8>, <16 x i8>, <8 x i8>) nounwind readnone 63 declare <8 x i8> @llvm.aarch64.neon.tbl3.v8i8(<16 x i8>, <16 x i8>, <16 x i8>, <8 x i8>) nounwind r… 65 declare <8 x i8> @llvm.aarch64.neon.tbl4.v8i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <8 x i8>)… 71 %tmp3 = call <8 x i8> @llvm.aarch64.neon.tbx1.v8i8(<8 x i8> %A, <16 x i8> %B, <8 x i8> %C) 85 …%tmp3 = call <8 x i8> @llvm.aarch64.neon.tbx2.v8i8(<8 x i8> %A, <16 x i8> %B, <16 x i8> %C, <8 x i… [all …]
|
D | arm64-copy-tuple.ll | 16 %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8) 20 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) 23 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) 33 %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8) 37 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) 40 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) 50 %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8) 54 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) 57 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) 67 %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8) [all …]
|
D | arm64-neon-across.ll | 19 declare i32 @llvm.aarch64.neon.saddv.i32.v8i8(<8 x i8>) 35 declare i32 @llvm.aarch64.neon.uminv.i32.v8i8(<8 x i8>) 39 declare i32 @llvm.aarch64.neon.sminv.i32.v8i8(<8 x i8>) 55 declare i32 @llvm.aarch64.neon.umaxv.i32.v8i8(<8 x i8>) 59 declare i32 @llvm.aarch64.neon.smaxv.i32.v8i8(<8 x i8>) 75 declare i32 @llvm.aarch64.neon.uaddlv.i32.v8i8(<8 x i8>) 79 declare i32 @llvm.aarch64.neon.saddlv.i32.v8i8(<8 x i8>) 85 %saddlvv.i = tail call i32 @llvm.aarch64.neon.saddlv.i32.v8i8(<8 x i8> %a) 102 %uaddlvv.i = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v8i8(<8 x i8> %a) 169 %smaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v8i8(<8 x i8> %a) [all …]
|
/external/swiftshader/third_party/llvm-7.0/configs/common/lib/Target/AArch64/ |
D | AArch64GenDAGISel.inc | 508 /* 963*/ OPC_CheckChild0Type, MVT::v8i8, 530 …// Src: (st (vector_extract:{ *:[i32] } VecListOne64:{ *:[v8i8] }:$Vt, (imm:{ *:[i64] })<<P:Predic… 531 …// Dst: (ST1i8 (SUBREG_TO_REG:{ *:[f128] } 0:{ *:[i32] }, VecListOne64:{ *:[v8i8] }:$Vt, dsub:{ *:… 1163 /* 2354*/ OPC_CheckChild1Type, MVT::v8i8, 1173 …// Src: (st FPR64:{ *:[v8i8] }:$Rt, (ro_Windexed64:{ *:[iPTR] } GPR64sp:{ *:[i64] }:$Rn, GPR32:{ *… 1174 …// Dst: (STRDroW FPR64:{ *:[v8i8] }:$Rt, GPR64sp:{ *:[i64] }:$Rn, GPR32:{ *:[i32] }:$Rm, ro_Wexten… 1180 …// Src: (st FPR64:{ *:[v8i8] }:$Rt, (ro_Xindexed64:{ *:[iPTR] } GPR64sp:{ *:[i64] }:$Rn, GPR64:{ *… 1181 …// Dst: (STRDroX FPR64:{ *:[v8i8] }:$Rt, GPR64sp:{ *:[i64] }:$Rn, GPR64:{ *:[i64] }:$Rm, ro_Xexten… 1198 …// Src: (st FPR64:{ *:[v8i8] }:$Rt, (am_indexed7s32:{ *:[iPTR] } GPR64sp:{ *:[i64] }:$Rn, simm7s4:… 1199 …*:[i32] } FPR64:{ *:[v8i8] }:$Rt, ssub:{ *:[i32] }), (CPYi32:{ *:[i32] } (SUBREG_TO_REG:{ *:[f128]… [all …]
|
/external/llvm/test/Transforms/LoopStrengthReduce/ARM/ |
D | ivchain-ARM.ll | 331 %vld1 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr, i32 1) 333 %vld2 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr3, i32 1) 335 %vld3 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr7, i32 1) 337 %vld4 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr11, i32 1) 338 %vld5 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %src.addr, i32 1) 340 %vld6 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr17, i32 1) 342 %vld7 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr20, i32 1) 344 %vld8 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr23, i32 1) 345 %vadd1 = tail call <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8> %vld1, <8 x i8> %vld2) nounwind 346 %vadd2 = tail call <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8> %vld2, <8 x i8> %vld3) nounwind [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/LoopStrengthReduce/ARM/ |
D | ivchain-ARM.ll | 328 %vld1 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr, i32 1) 330 %vld2 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr3, i32 1) 332 %vld3 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr7, i32 1) 334 %vld4 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr11, i32 1) 335 %vld5 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %src.addr, i32 1) 337 %vld6 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr17, i32 1) 339 %vld7 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr20, i32 1) 341 %vld8 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %add.ptr23, i32 1) 342 %vadd1 = tail call <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8> %vld1, <8 x i8> %vld2) nounwind 343 %vadd2 = tail call <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8> %vld2, <8 x i8> %vld3) nounwind [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/unittests/FuzzMutate/ |
D | OperationsTest.cpp | 96 Constant *v8i8 = ConstantVector::getSplat(8, i8); in TEST() local 114 EXPECT_TRUE(AnyType.matches({}, v8i8)); in TEST() 118 AnyType.generate({}, {i32->getType(), f16->getType(), v8i8->getType()}), in TEST() 119 Each(AnyOf(TypesMatch(i32), TypesMatch(f16), TypesMatch(v8i8)))); in TEST() 128 AnyInt.generate({}, {i32->getType(), f16->getType(), v8i8->getType()}), in TEST() 139 AnyFP.generate({}, {i32->getType(), f16->getType(), v8i8->getType()}), in TEST() 146 EXPECT_FALSE(AnyPtr.matches({}, v8i8)); in TEST() 150 AnyPtr.generate({}, {i32->getType(), f16->getType(), v8i8->getType()}), in TEST() 154 EXPECT_TRUE(AnyVec.matches({}, v8i8)); in TEST() 160 EXPECT_THAT(AnyVec.generate({}, {v8i8->getType()}), in TEST() [all …]
|
/external/clang/test/CodeGen/ |
D | systemz-abi-vector.c | 20 typedef __attribute__((vector_size(8))) char v8i8; typedef 54 v8i8 pass_v8i8(v8i8 arg) { return arg; } in pass_v8i8() 148 struct agg_v8i8 { v8i8 a; }; 285 v8i8 va_v8i8(__builtin_va_list l) { return __builtin_va_arg(l, v8i8); } in va_v8i8()
|
/external/swiftshader/third_party/LLVM/lib/Target/ARM/ |
D | ARMCallingConv.td | 31 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>, 49 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>, 63 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>, 75 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>, 91 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>, 138 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>, 148 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>, 163 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>, 175 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
|
/external/swiftshader/third_party/llvm-7.0/configs/common/lib/Target/ARM/ |
D | ARMGenDAGISel.inc | 1214 /* 2574*/ OPC_SwitchType /*2 cases */, 48, MVT::v8i8,// ->2625 1225 …i32] } DPR:{ *:[v2i32] }:$Vd, (bitconvert:{ *:[v2i32] } (NEONvmovImm:{ *:[v8i8] } (timm:{ *:[i32] … 1233 …i64] } DPR:{ *:[v1i64] }:$Vd, (bitconvert:{ *:[v1i64] } (NEONvmovImm:{ *:[v8i8] } (timm:{ *:[i32] … 1268 /* 2692*/ OPC_CheckType, MVT::v8i8, 1280 …32] }:$Vm, (xor:{ *:[v2i32] } (bitconvert:{ *:[v2i32] } (NEONvmovImm:{ *:[v8i8] } (timm:{ *:[i32] … 1296 /* 2746*/ OPC_CheckType, MVT::v8i8, 1308 …i32] } DPR:{ *:[v2i32] }:$Vd, (bitconvert:{ *:[v2i32] } (NEONvmovImm:{ *:[v8i8] } (timm:{ *:[i32] … 1319 /* 2790*/ OPC_CheckType, MVT::v8i8, 1332 …:[v2i32] } (xor:{ *:[v2i32] } (bitconvert:{ *:[v2i32] } (NEONvmovImm:{ *:[v8i8] } (timm:{ *:[i32] … 1349 /* 2846*/ OPC_CheckType, MVT::v8i8, [all …]
|
/external/llvm/test/Bitcode/ |
D | arm32_neon_vcnt_upgrade.ll | 16 %tmp2 = call <8 x i8> @llvm.arm.neon.vcnt.v8i8(<8 x i8> %tmp1) 17 ;CHECK: call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> 22 declare <8 x i8> @llvm.arm.neon.vcnt.v8i8(<8 x i8>) nounwind readnone
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Bitcode/ |
D | arm32_neon_vcnt_upgrade.ll | 16 %tmp2 = call <8 x i8> @llvm.arm.neon.vcnt.v8i8(<8 x i8> %tmp1) 17 ;CHECK: call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> 22 declare <8 x i8> @llvm.arm.neon.vcnt.v8i8(<8 x i8>) nounwind readnone
|