/external/llvm-project/llvm/test/CodeGen/VE/VELIntrinsics/ |
D | vst.ll | 14 ; CHECK-NEXT: vld %v0, %s1, %s0 17 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256) 23 declare <256 x double> @llvm.ve.vl.vld.vssl(i64, i8*, i32) 34 ; CHECK-NEXT: vld %v0, %s1, %s0 37 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256) 51 ; CHECK-NEXT: vld %v0, 8, %s0 54 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256) 65 ; CHECK-NEXT: vld %v0, 8, %s0 68 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256) 79 ; CHECK-NEXT: vld %v0, %s1, %s0 [all …]
|
D | lvlgen.ll | 6 declare <256 x double> @llvm.ve.vl.vld.vssl(i64, i8*, i32) 18 ; CHECK-NEXT: vld %v0, 8, %s2 24 ; CHECK-NEXT: vld %v0, 16, %s2 29 ; CHECK-NEXT: vld %v0, 8, %s2 33 %l0 = tail call <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %P, i32 256) 35 %l1 = tail call <256 x double> @llvm.ve.vl.vld.vssl(i64 16, i8* %P, i32 128) 37 %l2 = tail call <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %P, i32 128) 51 ; CHECK-NEXT: vld %v0, 8, %s1 53 ; CHECK-NEXT: vld %v0, 16, %s1 55 ; CHECK-NEXT: vld %v0, 8, %s1 [all …]
|
D | lsv.ll | 14 ; CHECK-NEXT: vld %v0, 8, %s0 19 %4 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256) 26 declare <256 x double> @llvm.ve.vl.vld.vssl(i64, i8*, i32) 40 ; CHECK-NEXT: vld %v0, 8, %s0 44 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256) 58 ; CHECK-NEXT: vld %v0, 8, %s0 62 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256) 76 ; CHECK-NEXT: vld %v0, 8, %s0 80 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
|
D | vbrd.ll | 33 ; CHECK-NEXT: vld %v0, 8, %s1 37 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256) 44 declare <256 x double> @llvm.ve.vl.vld.vssl(i64, i8*, i32) 58 ; CHECK-NEXT: vld %v0, 8, %s1 70 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256) 109 ; CHECK-NEXT: vld %v0, 8, %s1 113 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256) 128 ; CHECK-NEXT: vld %v0, 8, %s1 133 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256) 165 ; CHECK-NEXT: vld %v0, 8, %s1 [all …]
|
D | vmv.ll | 14 ; CHECK-NEXT: vld %v0, 8, %s0 18 %4 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256) 25 declare <256 x double> @llvm.ve.vl.vld.vssl(i64, i8*, i32) 39 ; CHECK-NEXT: vld %v0, 8, %s0 46 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256) 61 ; CHECK-NEXT: vld %v0, 8, %s0 68 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
|
D | vld.ll | 14 ; CHECK-NEXT: vld %v0, %s1, %s0 19 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256) 25 declare <256 x double> @llvm.ve.vl.vld.vssl(i64, i8*, i32) 33 ; CHECK-NEXT: vld %v0, %s1, %s2 34 ; CHECK-NEXT: vld %v0, %s1, %s0 39 %4 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %2, i32 256) 40 …%5 = tail call fast <256 x double> @llvm.ve.vl.vld.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 25… 46 declare <256 x double> @llvm.ve.vl.vld.vssvl(i64, i8*, <256 x double>, i32) 54 ; CHECK-NEXT: vld %v0, 8, %s0 59 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256) [all …]
|
/external/llvm-project/llvm/test/CodeGen/VE/Scalar/ |
D | inlineasm-vldvst.ll | 3 define void @vld(i8* %p, i64 %i) nounwind { 4 ; CHECK-LABEL: vld: 13 ; CHECK-NEXT: vld %v0, %s1, %s0 18 tail call <256 x double> asm sideeffect "vld $0, $2, $1", "=v,r,r"(i8* %p, i64 %i) nounwind 32 ; CHECK-NEXT: vld %v0, %s1, %s0 40 %1 = tail call <256 x double> asm sideeffect "vld $0, $2, $1", "=v,r,r"(i8* %p, i64 %i) nounwind 55 ; CHECK-NEXT: vld %v0, %s1, %s0 58 ; CHECK-NEXT: vld %v1, %s1, %s0 69 %1 = tail call <256 x double> asm sideeffect "vld $0, $2, $1", "=v,r,r"(i8* %p, i64 %i) nounwind 70 %2 = tail call <256 x double> asm sideeffect "vld $0, $2, $1", "=v,r,r"(i8* %p, i64 %i) nounwind
|
D | inlineasm-vldvst-reg.ll | 8 ;; t26: ch,glue = inlineasm t25, TargetExternalSymbol:i64'vld $0, $2, $1', MDNode:ch<null>, Targe… 15 %1 = tail call <256 x double> asm sideeffect "vld $0, $2, $1", "=v,r,r"(i8* %p, i64 %i) nounwind
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-neon-vector-list-spill.ll | 13 %vld = tail call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2.v2i32.p0i32(i32* %arg1) 22 %vld.extract = extractvalue { <2 x i32>, <2 x i32> } %vld, 0 23 %res = extractelement <2 x i32> %vld.extract, i32 1 33 …%vld = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0i16(i16* %arg1) 42 %vld.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %vld, 0 43 %res = extractelement <4 x i16> %vld.extract, i32 1 53 …%vld = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0i16… 62 %vld.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld, 0 63 %res = extractelement <4 x i16> %vld.extract, i32 0 73 %vld = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i32(i32* %arg1) [all …]
|
D | arm64-neon-copyPhysReg-tuple.ll | 10 …%vld = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2lane.v4i32.p0i32(<4 x i32> <i32 -1… 11 %extract = extractvalue { <4 x i32>, <4 x i32> } %vld, 0 24 …%vld = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3lane.v4i32.p0i32(<4 x i… 25 %extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %vld, 0 39 …%vld = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4lane.v4i32.p… 40 %extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld, 0
|
/external/llvm-project/llvm/test/CodeGen/AArch64/ |
D | arm64-neon-vector-list-spill.ll | 13 %vld = tail call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2.v2i32.p0i32(i32* %arg1) 22 %vld.extract = extractvalue { <2 x i32>, <2 x i32> } %vld, 0 23 %res = extractelement <2 x i32> %vld.extract, i32 1 33 …%vld = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0i16(i16* %arg1) 42 %vld.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %vld, 0 43 %res = extractelement <4 x i16> %vld.extract, i32 1 53 …%vld = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0i16… 62 %vld.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld, 0 63 %res = extractelement <4 x i16> %vld.extract, i32 0 73 %vld = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i32(i32* %arg1) [all …]
|
D | arm64-neon-copyPhysReg-tuple.ll | 10 …%vld = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2lane.v4i32.p0i32(<4 x i32> <i32 -1… 11 %extract = extractvalue { <4 x i32>, <4 x i32> } %vld, 0 24 …%vld = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3lane.v4i32.p0i32(<4 x i… 25 %extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %vld, 0 39 …%vld = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4lane.v4i32.p… 40 %extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld, 0
|
/external/jackson-databind/src/main/java/com/fasterxml/jackson/databind/ |
D | DatabindContext.java | 221 … PolymorphicTypeValidator.Validity vld = ptv.validateSubClassName(config, baseType, subClass); in resolveAndValidateSubType() local 222 if (vld == Validity.DENIED) { in resolveAndValidateSubType() 241 if (vld == Validity.INDETERMINATE) { in resolveAndValidateSubType() 242 vld = ptv.validateSubType(config, baseType, subType); in resolveAndValidateSubType() 243 if (vld != Validity.ALLOWED) { in resolveAndValidateSubType() 258 …PolymorphicTypeValidator.Validity vld = ptv.validateSubClassName(config, baseType, subClass.substr… in _resolveAndValidateGeneric() local 259 if (vld == Validity.DENIED) { in _resolveAndValidateGeneric() 267 if (vld != Validity.ALLOWED) { in _resolveAndValidateGeneric()
|
/external/llvm-project/llvm/test/MC/VE/ |
D | VLD.s | 6 # CHECK-INST: vld %v11, 23, %s12 8 vld %v11, 23, %s12 label 10 # CHECK-INST: vld.nc %vix, 63, %s22 12 vld.nc %vix, 63, %s22
|
/external/XNNPACK/src/f32-ibilinear-chw/gen/ |
D | wasmsimd-p4.c | 70 const v128_t vld = wasm_v32x4_shuffle(vldrd01, vldrd23, 0, 2, 4, 6); in xnn_f32_ibilinear_chw_ukernel__wasmsimd_p4() local 76 const v128_t vl = wasm_f32x4_add(vtl, wasm_f32x4_mul(vld, valphav)); in xnn_f32_ibilinear_chw_ukernel__wasmsimd_p4() 104 const v128_t vld = wasm_v32x4_shuffle(vldrd, vldrd, 0, 2, 0, 2); in xnn_f32_ibilinear_chw_ukernel__wasmsimd_p4() local 110 const v128_t vl = wasm_f32x4_add(vtl, wasm_f32x4_mul(vld, valphav)); in xnn_f32_ibilinear_chw_ukernel__wasmsimd_p4()
|
D | neonfma-p4.c | 70 const float32x4_t vld = vld_t.val[0]; in xnn_f32_ibilinear_chw_ukernel__neonfma_p4() local 77 const float32x4_t vl = vfmaq_f32(vtl, vld, valphav); in xnn_f32_ibilinear_chw_ukernel__neonfma_p4() 110 const float32x2_t vld = vld_t.val[0]; in xnn_f32_ibilinear_chw_ukernel__neonfma_p4() local 117 const float32x2_t vl = vfma_f32(vtl, vld, valphav); in xnn_f32_ibilinear_chw_ukernel__neonfma_p4()
|
D | neon-p4.c | 70 const float32x4_t vld = vld_t.val[0]; in xnn_f32_ibilinear_chw_ukernel__neon_p4() local 77 const float32x4_t vl = vmlaq_f32(vtl, vld, valphav); in xnn_f32_ibilinear_chw_ukernel__neon_p4() 110 const float32x2_t vld = vld_t.val[0]; in xnn_f32_ibilinear_chw_ukernel__neon_p4() local 117 const float32x2_t vl = vmla_f32(vtl, vld, valphav); in xnn_f32_ibilinear_chw_ukernel__neon_p4()
|
D | wasmsimd-p8.c | 156 const v128_t vld = wasm_v32x4_shuffle(vldrd01, vldrd23, 0, 2, 4, 6); in xnn_f32_ibilinear_chw_ukernel__wasmsimd_p8() local 162 const v128_t vl = wasm_f32x4_add(vtl, wasm_f32x4_mul(vld, valphav)); in xnn_f32_ibilinear_chw_ukernel__wasmsimd_p8() 190 const v128_t vld = wasm_v32x4_shuffle(vldrd, vldrd, 0, 2, 0, 2); in xnn_f32_ibilinear_chw_ukernel__wasmsimd_p8() local 196 const v128_t vl = wasm_f32x4_add(vtl, wasm_f32x4_mul(vld, valphav)); in xnn_f32_ibilinear_chw_ukernel__wasmsimd_p8()
|
D | neonfma-p8.c | 158 const float32x4_t vld = vld_t.val[0]; in xnn_f32_ibilinear_chw_ukernel__neonfma_p8() local 165 const float32x4_t vl = vfmaq_f32(vtl, vld, valphav); in xnn_f32_ibilinear_chw_ukernel__neonfma_p8() 198 const float32x2_t vld = vld_t.val[0]; in xnn_f32_ibilinear_chw_ukernel__neonfma_p8() local 205 const float32x2_t vl = vfma_f32(vtl, vld, valphav); in xnn_f32_ibilinear_chw_ukernel__neonfma_p8()
|
D | neon-p8.c | 158 const float32x4_t vld = vld_t.val[0]; in xnn_f32_ibilinear_chw_ukernel__neon_p8() local 165 const float32x4_t vl = vmlaq_f32(vtl, vld, valphav); in xnn_f32_ibilinear_chw_ukernel__neon_p8() 198 const float32x2_t vld = vld_t.val[0]; in xnn_f32_ibilinear_chw_ukernel__neon_p8() local 205 const float32x2_t vl = vmla_f32(vtl, vld, valphav); in xnn_f32_ibilinear_chw_ukernel__neon_p8()
|
/external/XNNPACK/src/f32-ibilinear-chw/ |
D | wasmsimd.c.in | 63 …const v128_t vld${ABC[P:P+4]} = wasm_v32x4_shuffle(vldrd${ABC[P:P+2]}, vldrd${ABC[P+2:P+4]}, 0, 2,… 71 …const v128_t vl${ABC[P:P+4]} = wasm_f32x4_add(vtl${ABC[P:P+4]}, wasm_f32x4_mul(vld${ABC[P:P+4]}, v… 111 const v128_t vld = wasm_v32x4_shuffle(vldrd01, vldrd23, 0, 2, 4, 6); variable 117 const v128_t vl = wasm_f32x4_add(vtl, wasm_f32x4_mul(vld, valphav)); 144 const v128_t vld = wasm_v32x4_shuffle(vldrd, vldrd, 0, 2, 0, 2); variable 150 const v128_t vl = wasm_f32x4_add(vtl, wasm_f32x4_mul(vld, valphav));
|
D | neon.c.in | 63 const float32x4_t vld${ABC[P:P+4]} = vld_t${ABC[P:P+4]}.val[0]; 72 …const float32x4_t vl${ABC[P:P+4]} = ${VMULADDQ_F32}(vtl${ABC[P:P+4]}, vld${ABC[P:P+4]}, valphav${A… 110 const float32x4_t vld = vld_t.val[0]; 117 const float32x4_t vl = ${VMULADDQ_F32}(vtl, vld, valphav); 148 const float32x2_t vld = vld_t.val[0]; 155 const float32x2_t vl = ${VMULADD_F32}(vtl, vld, valphav);
|
/external/arm-neon-tests/ |
D | ref_vtbX.c | 56 vld##X##_##T2##W((T1##W##_t *)lookup_table); \ in exec_vtbX() 176 vld##X##_##T2##W((T1##W##_t *)lookup_table); \ in exec_vtbX()
|
D | ref_vldX_lane.c | 53 vld##X##Q##_##T2##W(VECT_VAR(buffer_src, T1, W, N)); \ in exec_vldX_lane() 57 vld##X##Q##_lane_##T2##W(VECT_VAR(buffer_vld##X##_lane, T1, W, X), \ in exec_vldX_lane()
|
D | ref_vstX_lane.c | 55 vld##X##Q##_##T2##W(VECT_VAR(buffer_src, T1, W, N)); \ in exec_vstX_lane() 59 vld##X##Q##_lane_##T2##W(VECT_VAR(buffer_vld##X##_lane, T1, W, X), \ in exec_vstX_lane()
|