Home
last modified time | relevance | path

Searched refs:vt3 (Results 1 – 12 of 12) sorted by relevance

/third_party/ffmpeg/libavcodec/mips/
Dvp8_idct_msa.c51 v4i32 in0, in1, in2, in3, hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3; in ff_vp8_idct_add_msa() local
64 VP8_IDCT_1D_W(hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3); in ff_vp8_idct_add_msa()
65 SRARI_W4_SW(vt0, vt1, vt2, vt3, 3); in ff_vp8_idct_add_msa()
67 TRANSPOSE4x4_SW_SW(vt0, vt1, vt2, vt3, vt0, vt1, vt2, vt3); in ff_vp8_idct_add_msa()
73 ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3); in ff_vp8_idct_add_msa()
109 v4i32 hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3; in ff_vp8_luma_dc_wht_msa() local
120 BUTTERFLY_4(a1, d1, c1, b1, vt0, vt1, vt3, vt2); in ff_vp8_luma_dc_wht_msa()
121 ADD4(vt0, 3, vt1, 3, vt2, 3, vt3, 3, vt0, vt1, vt2, vt3); in ff_vp8_luma_dc_wht_msa()
122 SRA_4V(vt0, vt1, vt2, vt3, 3); in ff_vp8_luma_dc_wht_msa()
126 mb_dq_coeff[48] = __msa_copy_s_h((v8i16) vt3, 0); in ff_vp8_luma_dc_wht_msa()
[all …]
/third_party/node/deps/v8/src/codegen/arm64/
Dmacro-assembler-arm64.h1199 void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, in St1() argument
1202 st1(vt, vt2, vt3, dst); in St1()
1204 void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, in St1() argument
1207 st1(vt, vt2, vt3, vt4, dst); in St1()
1640 void Ld1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, in Ld1() argument
1643 ld1(vt, vt2, vt3, src); in Ld1()
1645 void Ld1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, in Ld1() argument
1648 ld1(vt, vt2, vt3, vt4, src); in Ld1()
1671 void Ld3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, in Ld3() argument
1674 ld3(vt, vt2, vt3, src); in Ld3()
[all …]
Dassembler-arm64.cc2355 const VRegister& vt3, const MemOperand& src) { in ld1() argument
2357 USE(vt3); in ld1()
2358 DCHECK(AreSameFormat(vt, vt2, vt3)); in ld1()
2359 DCHECK(AreConsecutive(vt, vt2, vt3)); in ld1()
2364 const VRegister& vt3, const VRegister& vt4, in ld1() argument
2367 USE(vt3); in ld1()
2369 DCHECK(AreSameFormat(vt, vt2, vt3, vt4)); in ld1()
2370 DCHECK(AreConsecutive(vt, vt2, vt3, vt4)); in ld1()
2399 const VRegister& vt3, const MemOperand& src) { in ld3() argument
2401 USE(vt3); in ld3()
[all …]
Dassembler-arm64.h1230 void st1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1234 void st1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1248 void st3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1252 void st3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1256 void st4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1260 void st4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1844 void ld1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1848 void ld1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1868 void ld3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1872 void ld3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
[all …]
/third_party/vixl/benchmarks/aarch64/
Dbench-utils.cc388 VRegister vt3((vt.GetCode() + 2) % kNumberOfVRegisters, kQRegSize); in GenerateNEONSequence() local
392 VIXL_ASSERT(!kCalleeSavedV.IncludesAliasOf(vt3)); in GenerateNEONSequence()
394 __ Ld3(vt.V4S(), vt2.V4S(), vt3.V4S(), MemOperand(scratch)); in GenerateNEONSequence()
395 __ St4(vt.V16B(), vt2.V16B(), vt3.V16B(), vt4.V16B(), MemOperand(scratch)); in GenerateNEONSequence()
/third_party/vixl/src/aarch64/
Dassembler-aarch64.cc2070 const VRegister& vt3, in ld1() argument
2072 USE(vt2, vt3); in ld1()
2074 VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); in ld1()
2075 VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); in ld1()
2082 const VRegister& vt3, in ld1() argument
2085 USE(vt2, vt3, vt4); in ld1()
2087 VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); in ld1()
2088 VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); in ld1()
2129 const VRegister& vt3, in ld3() argument
2131 USE(vt2, vt3); in ld3()
[all …]
Dmacro-assembler-aarch64.h3322 const VRegister& vt3, in Ld1() argument
3326 ld1(vt, vt2, vt3, src); in Ld1()
3330 const VRegister& vt3, in Ld1() argument
3335 ld1(vt, vt2, vt3, vt4, src); in Ld1()
3367 const VRegister& vt3, in Ld3() argument
3371 ld3(vt, vt2, vt3, src); in Ld3()
3375 const VRegister& vt3, in Ld3() argument
3380 ld3(vt, vt2, vt3, lane, src); in Ld3()
3384 const VRegister& vt3, in Ld3r() argument
3388 ld3r(vt, vt2, vt3, src); in Ld3r()
[all …]
Dassembler-aarch64.h2879 const VRegister& vt3,
2885 const VRegister& vt3,
2910 const VRegister& vt3,
2916 const VRegister& vt3,
2923 const VRegister& vt3,
2929 const VRegister& vt3,
2936 const VRegister& vt3,
2944 const VRegister& vt3,
3083 const VRegister& vt3,
3089 const VRegister& vt3,
[all …]
/third_party/skia/third_party/externals/libwebp/src/dsp/
Denc_msa.c47 v4i32 in0, in1, in2, in3, hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3; in ITransformOne() local
57 IDCT_1D_W(hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3); in ITransformOne()
58 SRARI_W4_SW(vt0, vt1, vt2, vt3, 3); in ITransformOne()
59 TRANSPOSE4x4_SW_SW(vt0, vt1, vt2, vt3, vt0, vt1, vt2, vt3); in ITransformOne()
65 ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3); in ITransformOne()
Ddec_msa.c45 v4i32 in0, in1, in2, in3, hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3; in TransformOne() local
55 IDCT_1D_W(hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3); in TransformOne()
56 SRARI_W4_SW(vt0, vt1, vt2, vt3, 3); in TransformOne()
57 TRANSPOSE4x4_SW_SW(vt0, vt1, vt2, vt3, vt0, vt1, vt2, vt3); in TransformOne()
63 ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3); in TransformOne()
/third_party/vixl/doc/aarch64/
Dsupported-instructions-aarch64.md4375 const VRegister& vt3,
4393 const VRegister& vt3,
4441 const VRegister& vt3,
4452 const VRegister& vt3,
4462 const VRegister& vt3,
4472 const VRegister& vt3,
4484 const VRegister& vt3,
4495 const VRegister& vt3,
5511 const VRegister& vt3,
5529 const VRegister& vt3,
[all …]
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/
DSVEInstrFormats.td343 ValueType vt2, ValueType vt3, Instruction inst>
344 : Pat<(vtd (op vt1:$Op1, vt2:$Op2, vt3:$Op3)),
348 ValueType vt2, ValueType vt3, ValueType vt4,
350 : Pat<(vtd (op vt1:$Op1, vt2:$Op2, vt3:$Op3, vt4:$Op4)),
359 ValueType vt2, ValueType vt3, Operand ImmTy,
361 : Pat<(vtd (op vt1:$Op1, vt2:$Op2, (vt3 ImmTy:$Op3))),
365 ValueType vt2, ValueType vt3, ValueType vt4,
367 : Pat<(vtd (op vt1:$Op1, vt2:$Op2, vt3:$Op3, (vt4 ImmTy:$Op4))),
2095 ValueType vt2, ValueType vt3, ElementSizeEnum Sz> {
2098 def : SVE_3_Op_Pat<vt1, op, vt1, vt2, vt3, !cast<Instruction>(NAME)>;