Home
last modified time | relevance | path

Searched refs:vt2 (Results 1 – 13 of 13) sorted by relevance

/third_party/ffmpeg/libavcodec/mips/
Dvp8_idct_msa.c51 v4i32 in0, in1, in2, in3, hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3; in ff_vp8_idct_add_msa() local
64 VP8_IDCT_1D_W(hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3); in ff_vp8_idct_add_msa()
65 SRARI_W4_SW(vt0, vt1, vt2, vt3, 3); in ff_vp8_idct_add_msa()
67 TRANSPOSE4x4_SW_SW(vt0, vt1, vt2, vt3, vt0, vt1, vt2, vt3); in ff_vp8_idct_add_msa()
73 ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3); in ff_vp8_idct_add_msa()
109 v4i32 hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3; in ff_vp8_luma_dc_wht_msa() local
120 BUTTERFLY_4(a1, d1, c1, b1, vt0, vt1, vt3, vt2); in ff_vp8_luma_dc_wht_msa()
121 ADD4(vt0, 3, vt1, 3, vt2, 3, vt3, 3, vt0, vt1, vt2, vt3); in ff_vp8_luma_dc_wht_msa()
122 SRA_4V(vt0, vt1, vt2, vt3, 3); in ff_vp8_luma_dc_wht_msa()
125 mb_dq_coeff[32] = __msa_copy_s_h((v8i16) vt2, 0); in ff_vp8_luma_dc_wht_msa()
[all …]
/third_party/node/deps/v8/src/codegen/arm64/
Dmacro-assembler-arm64.h1195 void St1(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) { in St1() argument
1197 st1(vt, vt2, dst); in St1()
1199 void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, in St1() argument
1202 st1(vt, vt2, vt3, dst); in St1()
1204 void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, in St1() argument
1207 st1(vt, vt2, vt3, vt4, dst); in St1()
1636 void Ld1(const VRegister& vt, const VRegister& vt2, const MemOperand& src) { in Ld1() argument
1638 ld1(vt, vt2, src); in Ld1()
1640 void Ld1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, in Ld1() argument
1643 ld1(vt, vt2, vt3, src); in Ld1()
[all …]
Dassembler-arm64.cc2346 void Assembler::ld1(const VRegister& vt, const VRegister& vt2, in ld1() argument
2348 USE(vt2); in ld1()
2349 DCHECK(AreSameFormat(vt, vt2)); in ld1()
2350 DCHECK(AreConsecutive(vt, vt2)); in ld1()
2354 void Assembler::ld1(const VRegister& vt, const VRegister& vt2, in ld1() argument
2356 USE(vt2); in ld1()
2358 DCHECK(AreSameFormat(vt, vt2, vt3)); in ld1()
2359 DCHECK(AreConsecutive(vt, vt2, vt3)); in ld1()
2363 void Assembler::ld1(const VRegister& vt, const VRegister& vt2, in ld1() argument
2366 USE(vt2); in ld1()
[all …]
Dassembler-arm64.h1227 void st1(const VRegister& vt, const VRegister& vt2, const MemOperand& src);
1230 void st1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1234 void st1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1241 void st2(const VRegister& vt, const VRegister& vt2, const MemOperand& src);
1244 void st2(const VRegister& vt, const VRegister& vt2, int lane,
1248 void st3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1252 void st3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1256 void st4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1260 void st4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1841 void ld1(const VRegister& vt, const VRegister& vt2, const MemOperand& src);
[all …]
/third_party/vixl/src/aarch64/
Dassembler-aarch64.cc2058 const VRegister& vt2, in ld1() argument
2060 USE(vt2); in ld1()
2062 VIXL_ASSERT(AreSameFormat(vt, vt2)); in ld1()
2063 VIXL_ASSERT(AreConsecutive(vt, vt2)); in ld1()
2069 const VRegister& vt2, in ld1() argument
2072 USE(vt2, vt3); in ld1()
2074 VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); in ld1()
2075 VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); in ld1()
2081 const VRegister& vt2, in ld1() argument
2085 USE(vt2, vt3, vt4); in ld1()
[all …]
Dmacro-assembler-aarch64.h3315 void Ld1(const VRegister& vt, const VRegister& vt2, const MemOperand& src) { in Ld1() argument
3318 ld1(vt, vt2, src); in Ld1()
3321 const VRegister& vt2, in Ld1() argument
3326 ld1(vt, vt2, vt3, src); in Ld1()
3329 const VRegister& vt2, in Ld1() argument
3335 ld1(vt, vt2, vt3, vt4, src); in Ld1()
3347 void Ld2(const VRegister& vt, const VRegister& vt2, const MemOperand& src) { in Ld2() argument
3350 ld2(vt, vt2, src); in Ld2()
3353 const VRegister& vt2, in Ld2() argument
3358 ld2(vt, vt2, lane, src); in Ld2()
[all …]
Dassembler-aarch64.h2874 void ld1(const VRegister& vt, const VRegister& vt2, const MemOperand& src);
2878 const VRegister& vt2,
2884 const VRegister& vt2,
2896 void ld2(const VRegister& vt, const VRegister& vt2, const MemOperand& src);
2900 const VRegister& vt2,
2905 void ld2r(const VRegister& vt, const VRegister& vt2, const MemOperand& src);
2909 const VRegister& vt2,
2915 const VRegister& vt2,
2922 const VRegister& vt2,
2928 const VRegister& vt2,
[all …]
/third_party/vixl/benchmarks/aarch64/
Dbench-utils.cc387 VRegister vt2((vt.GetCode() + 1) % kNumberOfVRegisters, kQRegSize); in GenerateNEONSequence() local
391 VIXL_ASSERT(!kCalleeSavedV.IncludesAliasOf(vt2)); in GenerateNEONSequence()
394 __ Ld3(vt.V4S(), vt2.V4S(), vt3.V4S(), MemOperand(scratch)); in GenerateNEONSequence()
395 __ St4(vt.V16B(), vt2.V16B(), vt3.V16B(), vt4.V16B(), MemOperand(scratch)); in GenerateNEONSequence()
/third_party/skia/third_party/externals/libwebp/src/dsp/
Denc_msa.c47 v4i32 in0, in1, in2, in3, hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3; in ITransformOne() local
57 IDCT_1D_W(hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3); in ITransformOne()
58 SRARI_W4_SW(vt0, vt1, vt2, vt3, 3); in ITransformOne()
59 TRANSPOSE4x4_SW_SW(vt0, vt1, vt2, vt3, vt0, vt1, vt2, vt3); in ITransformOne()
65 ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3); in ITransformOne()
Ddec_msa.c45 v4i32 in0, in1, in2, in3, hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3; in TransformOne() local
55 IDCT_1D_W(hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3); in TransformOne()
56 SRARI_W4_SW(vt0, vt1, vt2, vt3, 3); in TransformOne()
57 TRANSPOSE4x4_SW_SW(vt0, vt1, vt2, vt3, vt0, vt1, vt2, vt3); in TransformOne()
63 ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3); in TransformOne()
/third_party/mesa3d/src/intel/compiler/
Dbrw_clip_tri.c529 struct brw_indirect vt2 = brw_indirect(2, 0); in brw_clip_test() local
539 brw_MOV(p, get_addr_reg(vt2), brw_address(c->reg.vertex[2])); in brw_clip_test()
542 brw_MOV(p, v2, deref_4f(vt2, hpos_offset)); in brw_clip_test()
/third_party/vixl/doc/aarch64/
Dsupported-instructions-aarch64.md4374 const VRegister& vt2,
4392 const VRegister& vt2,
4401 void ld1(const VRegister& vt, const VRegister& vt2, const MemOperand& src)
4416 const VRegister& vt2,
4425 void ld2(const VRegister& vt, const VRegister& vt2, const MemOperand& src)
4432 void ld2r(const VRegister& vt, const VRegister& vt2, const MemOperand& src)
4440 const VRegister& vt2,
4451 const VRegister& vt2,
4461 const VRegister& vt2,
4471 const VRegister& vt2,
[all …]
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/
DSVEInstrFormats.td333 ValueType vt2, Instruction inst>
334 : Pat<(vtd (op vt1:$Op1, vt2:$Op2)),
338 ValueType vt2, Instruction inst, SubRegIndex sub>
339 : Pat<(vtd (op vt1:$Op1, vt2:$Op2)),
343 ValueType vt2, ValueType vt3, Instruction inst>
344 : Pat<(vtd (op vt1:$Op1, vt2:$Op2, vt3:$Op3)),
348 ValueType vt2, ValueType vt3, ValueType vt4,
350 : Pat<(vtd (op vt1:$Op1, vt2:$Op2, vt3:$Op3, vt4:$Op4)),
354 ValueType vt2, Operand ImmTy, Instruction inst>
355 : Pat<(vtd (op vt1:$Op1, (vt2 ImmTy:$Op2))),
[all …]