/external/swiftshader/third_party/llvm-7.0/llvm/test/MC/AArch64/ |
D | arm64-simd-ldst.s | 223 ld3.8b {v4, v5, v6}, [x19] 224 ld3.16b {v4, v5, v6}, [x19] 225 ld3.4h {v4, v5, v6}, [x19] 226 ld3.8h {v4, v5, v6}, [x19] 227 ld3.2s {v4, v5, v6}, [x19] 228 ld3.4s {v4, v5, v6}, [x19] 229 ld3.2d {v4, v5, v6}, [x19] 231 ld3.8b {v9, v10, v11}, [x9] 232 ld3.16b {v14, v15, v16}, [x19] 233 ld3.4h {v24, v25, v26}, [x29] [all …]
|
D | neon-simd-ldst-multi-elem.s | 402 ld3 { v0.16b, v1.16b, v2.16b }, [x0] 403 ld3 { v15.8h, v16.8h, v17.8h }, [x15] 404 ld3 { v31.4s, v0.4s, v1.4s }, [sp] 405 ld3 { v0.2d, v1.2d, v2.2d }, [x0] 406 ld3 { v0.8b, v1.8b, v2.8b }, [x0] 407 ld3 { v15.4h, v16.4h, v17.4h }, [x15] 408 ld3 { v31.2s, v0.2s, v1.2s }, [sp] 417 ld3 { v0.16b-v2.16b }, [x0] 418 ld3 { v15.8h-v17.8h }, [x15] 419 ld3 { v31.4s-v1.4s }, [sp] [all …]
|
D | neon-simd-ldst-one-elem.s | 105 ld3 { v0.b, v1.b, v2.b }[9], [x0] 106 ld3 { v15.h, v16.h, v17.h }[7], [x15] 107 ld3 { v31.s, v0.s, v1.s }[3], [sp] 108 ld3 { v0.d, v1.d, v2.d }[1], [x0] 266 ld3 { v0.b, v1.b, v2.b }[9], [x0], #3 267 ld3 { v15.h, v16.h, v17.h }[7], [x15], #6 268 ld3 { v31.s, v0.s, v1.s }[3], [sp], x3 269 ld3 { v0.d, v1.d, v2.d }[1], [x0], x6
|
D | neon-simd-post-ldst-multi-elem.s | 150 ld3 { v0.16b, v1.16b, v2.16b }, [x0], x1 151 ld3 { v15.8h, v16.8h, v17.8h }, [x15], x2 152 ld3 { v31.4s, v0.4s, v1.4s }, [sp], #48 153 ld3 { v0.2d, v1.2d, v2.2d }, [x0], #48 154 ld3 { v0.8b, v1.8b, v2.8b }, [x0], x2 155 ld3 { v15.4h, v16.4h, v17.4h }, [x15], x3 156 ld3 { v31.2s, v0.2s, v1.2s }, [sp], #24
|
/external/llvm/test/MC/AArch64/ |
D | arm64-simd-ldst.s | 223 ld3.8b {v4, v5, v6}, [x19] 224 ld3.16b {v4, v5, v6}, [x19] 225 ld3.4h {v4, v5, v6}, [x19] 226 ld3.8h {v4, v5, v6}, [x19] 227 ld3.2s {v4, v5, v6}, [x19] 228 ld3.4s {v4, v5, v6}, [x19] 229 ld3.2d {v4, v5, v6}, [x19] 231 ld3.8b {v9, v10, v11}, [x9] 232 ld3.16b {v14, v15, v16}, [x19] 233 ld3.4h {v24, v25, v26}, [x29] [all …]
|
D | neon-simd-ldst-multi-elem.s | 402 ld3 { v0.16b, v1.16b, v2.16b }, [x0] 403 ld3 { v15.8h, v16.8h, v17.8h }, [x15] 404 ld3 { v31.4s, v0.4s, v1.4s }, [sp] 405 ld3 { v0.2d, v1.2d, v2.2d }, [x0] 406 ld3 { v0.8b, v1.8b, v2.8b }, [x0] 407 ld3 { v15.4h, v16.4h, v17.4h }, [x15] 408 ld3 { v31.2s, v0.2s, v1.2s }, [sp] 417 ld3 { v0.16b-v2.16b }, [x0] 418 ld3 { v15.8h-v17.8h }, [x15] 419 ld3 { v31.4s-v1.4s }, [sp] [all …]
|
D | neon-simd-ldst-one-elem.s | 105 ld3 { v0.b, v1.b, v2.b }[9], [x0] 106 ld3 { v15.h, v16.h, v17.h }[7], [x15] 107 ld3 { v31.s, v0.s, v1.s }[3], [sp] 108 ld3 { v0.d, v1.d, v2.d }[1], [x0] 266 ld3 { v0.b, v1.b, v2.b }[9], [x0], #3 267 ld3 { v15.h, v16.h, v17.h }[7], [x15], #6 268 ld3 { v31.s, v0.s, v1.s }[3], [sp], x3 269 ld3 { v0.d, v1.d, v2.d }[1], [x0], x6
|
D | neon-simd-post-ldst-multi-elem.s | 150 ld3 { v0.16b, v1.16b, v2.16b }, [x0], x1 151 ld3 { v15.8h, v16.8h, v17.8h }, [x15], x2 152 ld3 { v31.4s, v0.4s, v1.4s }, [sp], #48 153 ld3 { v0.2d, v1.2d, v2.2d }, [x0], #48 154 ld3 { v0.8b, v1.8b, v2.8b }, [x0], x2 155 ld3 { v15.4h, v16.4h, v17.4h }, [x15], x3 156 ld3 { v31.2s, v0.2s, v1.2s }, [sp], #24
|
/external/capstone/suite/MC/AArch64/ |
D | neon-simd-ldst-multi-elem.s.cs | 170 0x00,0x40,0x40,0x4c = ld3 {v0.16b, v1.16b, v2.16b}, [x0] 171 0xef,0x45,0x40,0x4c = ld3 {v15.8h, v16.8h, v17.8h}, [x15] 172 0xff,0x4b,0x40,0x4c = ld3 {v31.4s, v0.4s, v1.4s}, [sp] 173 0x00,0x4c,0x40,0x4c = ld3 {v0.2d, v1.2d, v2.2d}, [x0] 174 0x00,0x40,0x40,0x0c = ld3 {v0.8b, v1.8b, v2.8b}, [x0] 175 0xef,0x45,0x40,0x0c = ld3 {v15.4h, v16.4h, v17.4h}, [x15] 176 0xff,0x4b,0x40,0x0c = ld3 {v31.2s, v0.2s, v1.2s}, [sp] 177 0x00,0x40,0x40,0x4c = ld3 {v0.16b, v1.16b, v2.16b}, [x0] 178 0xef,0x45,0x40,0x4c = ld3 {v15.8h, v16.8h, v17.8h}, [x15] 179 0xff,0x4b,0x40,0x4c = ld3 {v31.4s, v0.4s, v1.4s}, [sp] [all …]
|
D | neon-simd-ldst-one-elem.s.cs | 42 0x00,0x24,0x40,0x4d = ld3 {v0.b, v1.b, v2.b}[9], [x0] 43 0xef,0x79,0x40,0x4d = ld3 {v15.h, v16.h, v17.h}[7], [x15] 44 0xff,0xb3,0x40,0x4d = ld3 {v31.s, v0.s, v1.s}[3], [sp] 45 0x00,0xa4,0x40,0x4d = ld3 {v0.d, v1.d, v2.d}[1], [x0] 106 0x00,0x24,0xdf,0x4d = ld3 {v0.b, v1.b, v2.b}[9], [x0], #3 107 0xef,0x79,0xdf,0x4d = ld3 {v15.h, v16.h, v17.h}[7], [x15], #6 108 0xff,0xb3,0xc3,0x4d = ld3 {v31.s, v0.s, v1.s}[3], [sp], x3 109 0x00,0xa4,0xc6,0x4d = ld3 {v0.d, v1.d, v2.d}[1], [x0], x6
|
D | neon-simd-post-ldst-multi-elem.s.cs | 41 0x00,0x40,0xc1,0x4c = ld3 {v0.16b, v1.16b, v2.16b}, [x0], x1 42 0xef,0x45,0xc2,0x4c = ld3 {v15.8h, v16.8h, v17.8h}, [x15], x2 43 0xff,0x4b,0xdf,0x4c = ld3 {v31.4s, v0.4s, v1.4s}, [sp], #48 44 0x00,0x4c,0xdf,0x4c = ld3 {v0.2d, v1.2d, v2.2d}, [x0], #48 45 0x00,0x40,0xc2,0x0c = ld3 {v0.8b, v1.8b, v2.8b}, [x0], x2 46 0xef,0x45,0xc3,0x0c = ld3 {v15.4h, v16.4h, v17.4h}, [x15], x3 47 0xff,0x4b,0xdf,0x0c = ld3 {v31.2s, v0.2s, v1.2s}, [sp], #24
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | promote-alloca-array-allocation.ll | 22 %ld3 = load i32, i32* %arrayidx12 24 store i32 %ld3, i32 addrspace(1)* %arrayidx13 44 %ld3 = load i32, i32* %arrayidx12 46 store i32 %ld3, i32 addrspace(1)* %arrayidx13
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/ |
D | promote-alloca-array-allocation.ll | 22 %ld3 = load i32, i32* %arrayidx12 24 store i32 %ld3, i32 addrspace(1)* %arrayidx13 44 %ld3 = load i32, i32* %arrayidx12 46 store i32 %ld3, i32 addrspace(1)* %arrayidx13
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/ |
D | arm64-indexed-vector-ldst.ll | 869 ;CHECK: ld3.16b { v0, v1, v2 }, [x0], #48 870 %ld3 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0i8(i8* %A) 873 ret { <16 x i8>, <16 x i8>, <16 x i8> } %ld3 878 ;CHECK: ld3.16b { v0, v1, v2 }, [x0], x{{[0-9]+}} 879 %ld3 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0i8(i8* %A) 882 ret { <16 x i8>, <16 x i8>, <16 x i8> } %ld3 885 declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0i8(i8*) 890 ;CHECK: ld3.8b { v0, v1, v2 }, [x0], #24 891 %ld3 = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0i8(i8* %A) 894 ret { <8 x i8>, <8 x i8>, <8 x i8> } %ld3 [all …]
|
D | arm64-neon-vector-list-spill.ll | 29 ; CHECK: ld3 { v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h }, [{{x[0-9]+|sp}}] 33 …%vld = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0i16(i16* %arg1) 89 ; CHECK: ld3 { v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s }, [{{x[0-9]+|sp}}] 93 …%vld3 = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3.v4f32.p0f32(flo… 128 declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0i16(i16*) 131 declare { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3.v4f32.p0f32(float*)
|
D | arm64-ld1.ll | 20 ; CHECK: ld3.8b { v0, v1, v2 }, [x0] 22 %tmp2 = call %struct.__neon_int8x8x3_t @llvm.aarch64.neon.ld3.v8i8.p0i8(i8* %A) 36 declare %struct.__neon_int8x8x3_t @llvm.aarch64.neon.ld3.v8i8.p0i8(i8*) nounwind readonly 55 ; CHECK: ld3.16b { v0, v1, v2 }, [x0] 57 %tmp2 = call %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld3.v16i8.p0i8(i8* %A) 71 declare %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld3.v16i8.p0i8(i8*) nounwind readonly 90 ; CHECK: ld3.4h { v0, v1, v2 }, [x0] 92 %tmp2 = call %struct.__neon_int16x4x3_t @llvm.aarch64.neon.ld3.v4i16.p0i16(i16* %A) 106 declare %struct.__neon_int16x4x3_t @llvm.aarch64.neon.ld3.v4i16.p0i16(i16*) nounwind readonly 125 ; CHECK: ld3.8h { v0, v1, v2 }, [x0] [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-indexed-vector-ldst.ll | 869 ;CHECK: ld3.16b { v0, v1, v2 }, [x0], #48 870 %ld3 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0i8(i8* %A) 873 ret { <16 x i8>, <16 x i8>, <16 x i8> } %ld3 878 ;CHECK: ld3.16b { v0, v1, v2 }, [x0], x{{[0-9]+}} 879 %ld3 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0i8(i8* %A) 882 ret { <16 x i8>, <16 x i8>, <16 x i8> } %ld3 885 declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0i8(i8*) 890 ;CHECK: ld3.8b { v0, v1, v2 }, [x0], #24 891 %ld3 = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0i8(i8* %A) 894 ret { <8 x i8>, <8 x i8>, <8 x i8> } %ld3 [all …]
|
D | arm64-neon-vector-list-spill.ll | 29 ; CHECK: ld3 { v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h }, [{{x[0-9]+|sp}}] 33 …%vld = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0i16(i16* %arg1) 89 ; CHECK: ld3 { v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s }, [{{x[0-9]+|sp}}] 93 …%vld3 = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3.v4f32.p0f32(flo… 128 declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0i16(i16*) 131 declare { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3.v4f32.p0f32(float*)
|
D | arm64-ld1.ll | 20 ; CHECK: ld3.8b { v0, v1, v2 }, [x0] 22 %tmp2 = call %struct.__neon_int8x8x3_t @llvm.aarch64.neon.ld3.v8i8.p0i8(i8* %A) 36 declare %struct.__neon_int8x8x3_t @llvm.aarch64.neon.ld3.v8i8.p0i8(i8*) nounwind readonly 55 ; CHECK: ld3.16b { v0, v1, v2 }, [x0] 57 %tmp2 = call %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld3.v16i8.p0i8(i8* %A) 71 declare %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld3.v16i8.p0i8(i8*) nounwind readonly 90 ; CHECK: ld3.4h { v0, v1, v2 }, [x0] 92 %tmp2 = call %struct.__neon_int16x4x3_t @llvm.aarch64.neon.ld3.v4i16.p0i16(i16* %A) 106 declare %struct.__neon_int16x4x3_t @llvm.aarch64.neon.ld3.v4i16.p0i16(i16*) nounwind readonly 125 ; CHECK: ld3.8h { v0, v1, v2 }, [x0] [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/SLPVectorizer/X86/ |
D | bswap.ll | 69 %ld3 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 3), align 4 73 %bswap3 = call i64 @llvm.bswap.i64(i64 %ld3) 91 %ld3 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 3), align 4 95 %bswap3 = call i32 @llvm.bswap.i32(i32 %ld3) 122 %ld3 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 3), align 2 130 %bswap3 = call i32 @llvm.bswap.i32(i32 %ld3) 156 …%ld3 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 3), align… 164 %bswap3 = call i16 @llvm.bswap.i16(i16 %ld3) 199 …%ld3 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 3), ali… 215 %bswap3 = call i16 @llvm.bswap.i16(i16 %ld3)
|
D | bitreverse.ll | 65 %ld3 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 3), align 4 69 %bitreverse3 = call i64 @llvm.bitreverse.i64(i64 %ld3) 87 %ld3 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 3), align 4 91 %bitreverse3 = call i32 @llvm.bitreverse.i32(i32 %ld3) 124 %ld3 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 3), align 2 132 %bitreverse3 = call i32 @llvm.bitreverse.i32(i32 %ld3) 158 …%ld3 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 3), align… 166 %bitreverse3 = call i16 @llvm.bitreverse.i16(i16 %ld3) 207 …%ld3 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 3), ali… 223 %bitreverse3 = call i16 @llvm.bitreverse.i16(i16 %ld3) [all …]
|
/external/llvm/test/Transforms/SLPVectorizer/X86/ |
D | bswap.ll | 69 %ld3 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 3), align 4 73 %bswap3 = call i64 @llvm.bswap.i64(i64 %ld3) 91 %ld3 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 3), align 4 95 %bswap3 = call i32 @llvm.bswap.i32(i32 %ld3) 122 %ld3 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 3), align 2 130 %bswap3 = call i32 @llvm.bswap.i32(i32 %ld3) 156 …%ld3 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 3), align… 164 %bswap3 = call i16 @llvm.bswap.i16(i16 %ld3) 199 …%ld3 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 3), ali… 215 %bswap3 = call i16 @llvm.bswap.i16(i16 %ld3)
|
D | ctpop.ll | 57 %ld3 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 3), align 4 61 %ctpop3 = call i64 @llvm.ctpop.i64(i64 %ld3) 79 %ld3 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 3), align 4 83 %ctpop3 = call i32 @llvm.ctpop.i32(i32 %ld3) 110 %ld3 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 3), align 2 118 %ctpop3 = call i32 @llvm.ctpop.i32(i32 %ld3) 144 …%ld3 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 3), align… 152 %ctpop3 = call i16 @llvm.ctpop.i16(i16 %ld3) 187 …%ld3 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 3), ali… 203 %ctpop3 = call i16 @llvm.ctpop.i16(i16 %ld3) [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/EarlyCSE/AArch64/ |
D | intrinsics.ll | 148 ; Check that @llvm.aarch64.neon.ld3 is not optimized away by Early CSE due 149 ; to mismatch between st2 and ld3. 151 ; CHECK: call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0i8 170 %vld3 = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0i8(i8* %5) 207 %vld3 = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0i8(i8* %5) 228 declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0i8(i8*)
|
/external/llvm/test/Transforms/EarlyCSE/AArch64/ |
D | intrinsics.ll | 146 ; Check that @llvm.aarch64.neon.ld3 is not optimized away by Early CSE due 147 ; to mismatch between st2 and ld3. 149 ; CHECK: call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0i8 168 %vld3 = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0i8(i8* %5) 205 %vld3 = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0i8(i8* %5) 226 declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0i8(i8*)
|