/external/vixl/test/aarch64/ |
D | test-assembler-fp-aarch64.cc | 300 __ Fmov(tgt2, fp_tgt); in LoadFPValueHelper() local 332 __ Fmov(s1, 255.0); in TEST() local 333 __ Fmov(d2, 12.34567); in TEST() local 334 __ Fmov(s3, 0.0); in TEST() local 335 __ Fmov(d4, 0.0); in TEST() local 336 __ Fmov(s5, kFP32PositiveInfinity); in TEST() local 337 __ Fmov(d6, kFP64NegativeInfinity); in TEST() local 338 __ Fmov(h7, RawbitsToFloat16(0x6400U)); in TEST() local 339 __ Fmov(h8, kFP16PositiveInfinity); in TEST() local 340 __ Fmov(s11, 1.0); in TEST() local [all …]
|
D | test-assembler-neon-aarch64.cc | 3681 __ Fmov(v0.V4H(), 24.0); in TEST() local 3682 __ Fmov(v1.V4H(), 1024.0); in TEST() local 3683 __ Fmov(v2.V8H(), 5.5); in TEST() local 3684 __ Fmov(v3.V8H(), 2048.0); in TEST() local 3685 __ Fmov(v4.V8H(), kFP16PositiveInfinity); in TEST() local 3686 __ Fmov(v5.V8H(), kFP16NegativeInfinity); in TEST() local 3687 __ Fmov(v6.V4H(), RawbitsToFloat16(0x7c2f)); in TEST() local 3688 __ Fmov(v7.V8H(), RawbitsToFloat16(0xfe0f)); in TEST() local 3723 __ Fmov(v0.V4H(), 24.0); in TEST() local 3724 __ Fmov(v1.V4H(), 1024.0); in TEST() local [all …]
|
D | test-utils-aarch64.cc | 579 __ Fmov(dn, value); in ClobberFP() local 584 __ Fmov(dn, first); in ClobberFP() local
|
D | test-assembler-aarch64.cc | 7795 __ Fmov(d1, x1); in TEST() local 7796 __ Fmov(d2, x2); in TEST() local 7797 __ Fmov(d3, x3); in TEST() local 7798 __ Fmov(d4, x4); in TEST() local 7863 __ Fmov(d1, x1); in TEST() local 7864 __ Fmov(d2, x2); in TEST() local 7865 __ Fmov(d3, x3); in TEST() local 7866 __ Fmov(d4, x4); in TEST() local 7867 __ Fmov(d5, x1); in TEST() local 7868 __ Fmov(d6, x2); in TEST() local [all …]
|
D | test-disasm-neon-aarch64.cc | 3102 COMPARE_MACRO(Fmov(v0.V2S(), 1.0f), "fmov v0.2s, #0x70 (1.0000)"); in TEST() 3103 COMPARE_MACRO(Fmov(v31.V2S(), -13.0f), "fmov v31.2s, #0xaa (-13.0000)"); in TEST() 3104 COMPARE_MACRO(Fmov(v0.V4S(), 1.0f), "fmov v0.4s, #0x70 (1.0000)"); in TEST() 3105 COMPARE_MACRO(Fmov(v31.V4S(), -13.0f), "fmov v31.4s, #0xaa (-13.0000)"); in TEST() 3106 COMPARE_MACRO(Fmov(v1.V2D(), 1.0), "fmov v1.2d, #0x70 (1.0000)"); in TEST() 3107 COMPARE_MACRO(Fmov(v29.V2D(), -13.0), "fmov v29.2d, #0xaa (-13.0000)"); in TEST() 3109 COMPARE_MACRO(Fmov(v0.V4H(), Float16(-5.0f)), "fmov v0.4h, #0x94 (-5.0000)"); in TEST() 3110 COMPARE_MACRO(Fmov(v31.V8H(), Float16(29.0f)), in TEST() 3112 COMPARE_MACRO(Fmov(v0.V4H(), Float16(-5.0)), "fmov v0.4h, #0x94 (-5.0000)"); in TEST() 3113 COMPARE_MACRO(Fmov(v31.V8H(), Float16(29.0)), "fmov v31.8h, #0x3d (29.0000)"); in TEST() [all …]
|
D | test-disasm-sve-aarch64.cc | 3024 COMPARE_MACRO(Fmov(z13.VnD(), p0.Merging(), 1.0), in TEST() 3026 COMPARE_MACRO(Fmov(z13.VnD(), p0.Merging(), 0.0), "mov z13.d, p0/m, #0"); in TEST() 3031 COMPARE_MACRO(Fmov(z13.VnD(), 1.0), "fmov z13.d, #0x70 (1.0000)"); in TEST() 3032 COMPARE_MACRO(Fmov(z13.VnD(), 0.0), "mov z13.d, #0"); in TEST() 3035 COMPARE_MACRO(Fmov(z13.VnD(), p0.Merging(), -0.0), in TEST() 3038 COMPARE_MACRO(Fmov(z13.VnD(), -0.0), "mov z13.d, #0x8000000000000000"); in TEST() 3260 COMPARE_MACRO(Fmov(z26.VnH(), Float16(0.0)), "mov z26.h, #0"); in TEST() 3261 COMPARE_MACRO(Fmov(z26.VnH(), Float16(0.0)), "mov z26.h, #0"); in TEST() 3262 COMPARE_MACRO(Fmov(z27.VnS(), 255.0f), in TEST() 3265 COMPARE_MACRO(Fmov(z28.VnD(), 12.3456), in TEST()
|
D | test-assembler-sve-aarch64.cc | 366 __ Fmov(d30, 42.0); in TEST_SVE() local 385 __ Fmov(s2, 1.5f); in TEST_SVE() local 386 __ Fmov(d3, d30); in TEST_SVE() local 5662 __ Fmov(d0, RawbitsToDouble(0x7ffaaaaa22223456)); in TEST_SVE() local 6710 __ Fmov(z9.VnD(), pg.Merging(), -9.0); in TEST_SVE() local 6719 __ Fmov(z14.VnS(), pg.Merging(), 0.0); in TEST_SVE() local 6720 __ Fmov(z15.VnH(), pg.Merging(), Float16(42.0)); in TEST_SVE() local 6721 __ Fmov(z16.VnD(), pg.Merging(), RawbitsToDouble(0x7ff0000012340000)); // NaN in TEST_SVE() local 6722 __ Fmov(z17.VnH(), pg.Merging(), kFP64NegativeInfinity); in TEST_SVE() local 16340 __ Fmov(s2, 2.0); in TEST_SVE() local [all …]
|
/external/vixl/src/aarch64/ |
D | macro-assembler-aarch64.cc | 1494 Fmov(tmp, value); in Fcmp() 1507 void MacroAssembler::Fmov(VRegister vd, double imm) { in Fmov() function in vixl::aarch64::MacroAssembler 1513 Fmov(vd, Float16(imm)); in Fmov() 1518 Fmov(vd, static_cast<float>(imm)); in Fmov() 1544 void MacroAssembler::Fmov(VRegister vd, float imm) { in Fmov() function in vixl::aarch64::MacroAssembler 1550 Fmov(vd, Float16(imm)); in Fmov() 1555 Fmov(vd, static_cast<double>(imm)); in Fmov() 1581 void MacroAssembler::Fmov(VRegister vd, Float16 imm) { in Fmov() function in vixl::aarch64::MacroAssembler 1586 Fmov(vd, FPToFloat(imm, kIgnoreDefaultNaN)); in Fmov() 1591 Fmov(vd, FPToDouble(imm, kIgnoreDefaultNaN)); in Fmov() [all …]
|
D | macro-assembler-aarch64.h | 1577 void Fmov(const VRegister& vd, const VRegister& vn) { in Fmov() function 1583 void Fmov(const VRegister& vd, const Register& rn) { in Fmov() function 1589 void Fmov(const VRegister& vd, int index, const Register& rn) { in Fmov() function 1598 void Fmov(const Register& rd, const VRegister& vn, int index) { in Fmov() function 1612 void Fmov(VRegister vd, double imm); 1613 void Fmov(VRegister vd, float imm); 1614 void Fmov(VRegister vd, const Float16 imm); 1617 void Fmov(VRegister vd, T imm) { in Fmov() function 1619 Fmov(vd, static_cast<double>(imm)); in Fmov() 1621 void Fmov(Register rd, VRegister vn) { in Fmov() function [all …]
|
/external/vixl/benchmarks/aarch64/ |
D | bench-utils.cc | 372 __ Fmov(PickV(size), 1.25 * GetRandomBits(2)); in GenerateFPSequence() local
|