/external/vixl/test/aarch64/ |
D | test-assembler-fp-aarch64.cc | 299 __ Fmov(tgt2, fp_tgt); in LoadFPValueHelper() local 331 __ Fmov(s1, 255.0); in TEST() local 332 __ Fmov(d2, 12.34567); in TEST() local 333 __ Fmov(s3, 0.0); in TEST() local 334 __ Fmov(d4, 0.0); in TEST() local 335 __ Fmov(s5, kFP32PositiveInfinity); in TEST() local 336 __ Fmov(d6, kFP64NegativeInfinity); in TEST() local 337 __ Fmov(h7, RawbitsToFloat16(0x6400U)); in TEST() local 338 __ Fmov(h8, kFP16PositiveInfinity); in TEST() local 339 __ Fmov(s11, 1.0); in TEST() local [all …]
|
D | test-assembler-neon-aarch64.cc | 3680 __ Fmov(v0.V4H(), 24.0); in TEST() local 3681 __ Fmov(v1.V4H(), 1024.0); in TEST() local 3682 __ Fmov(v2.V8H(), 5.5); in TEST() local 3683 __ Fmov(v3.V8H(), 2048.0); in TEST() local 3684 __ Fmov(v4.V8H(), kFP16PositiveInfinity); in TEST() local 3685 __ Fmov(v5.V8H(), kFP16NegativeInfinity); in TEST() local 3686 __ Fmov(v6.V4H(), RawbitsToFloat16(0x7c2f)); in TEST() local 3687 __ Fmov(v7.V8H(), RawbitsToFloat16(0xfe0f)); in TEST() local 3722 __ Fmov(v0.V4H(), 24.0); in TEST() local 3723 __ Fmov(v1.V4H(), 1024.0); in TEST() local [all …]
|
D | test-assembler-aarch64.cc | 8351 __ Fmov(d1, x1); in TEST() local 8352 __ Fmov(d2, x2); in TEST() local 8353 __ Fmov(d3, x3); in TEST() local 8354 __ Fmov(d4, x4); in TEST() local 8419 __ Fmov(d1, x1); in TEST() local 8420 __ Fmov(d2, x2); in TEST() local 8421 __ Fmov(d3, x3); in TEST() local 8422 __ Fmov(d4, x4); in TEST() local 8423 __ Fmov(d5, x1); in TEST() local 8424 __ Fmov(d6, x2); in TEST() local [all …]
|
D | test-utils-aarch64.cc | 619 __ Fmov(dn, value); in ClobberFP() local 624 __ Fmov(dn, first); in ClobberFP() local
|
D | test-assembler-sve-aarch64.cc | 300 __ Fmov(d30, 42.0); in TEST_SVE() local 319 __ Fmov(s2, 1.5f); in TEST_SVE() local 320 __ Fmov(d3, d30); in TEST_SVE() local 5602 __ Fmov(d0, RawbitsToDouble(0x7ffaaaaa22223456)); in TEST_SVE() local 6650 __ Fmov(z9.VnD(), pg.Merging(), -9.0); in TEST_SVE() local 6659 __ Fmov(z14.VnS(), pg.Merging(), 0.0); in TEST_SVE() local 6660 __ Fmov(z15.VnH(), pg.Merging(), Float16(42.0)); in TEST_SVE() local 6661 __ Fmov(z16.VnD(), pg.Merging(), RawbitsToDouble(0x7ff0000012340000)); // NaN in TEST_SVE() local 6662 __ Fmov(z17.VnH(), pg.Merging(), kFP64NegativeInfinity); in TEST_SVE() local 16285 __ Fmov(s2, 2.0); in TEST_SVE() local [all …]
|
/external/vixl/examples/aarch64/ |
D | simulator_interception.cc | 74 __ Fmov(s0, 3.5); in GenerateInterceptionExamples() local
|
/external/vixl/benchmarks/aarch64/ |
D | bench-utils.cc | 373 __ Fmov(PickV(size), 1.25 * GetRandomBits(2)); in GenerateFPSequence() local
|
/external/vixl/src/aarch64/ |
D | macro-assembler-aarch64.cc | 1546 void MacroAssembler::Fmov(VRegister vd, double imm) { in Fmov() function in vixl::aarch64::MacroAssembler 1582 void MacroAssembler::Fmov(VRegister vd, float imm) { in Fmov() function in vixl::aarch64::MacroAssembler 1618 void MacroAssembler::Fmov(VRegister vd, Float16 imm) { in Fmov() function in vixl::aarch64::MacroAssembler
|
D | macro-assembler-aarch64.h | 1574 void Fmov(const VRegister& vd, const VRegister& vn) { in Fmov() function 1580 void Fmov(const VRegister& vd, const Register& rn) { in Fmov() function 1586 void Fmov(const VRegister& vd, int index, const Register& rn) { in Fmov() function 1595 void Fmov(const Register& rd, const VRegister& vn, int index) { in Fmov() function 1614 void Fmov(VRegister vd, T imm) { in Fmov() function 1618 void Fmov(Register rd, VRegister vn) { in Fmov() function 4618 void Fmov(const ZRegister& zd, double imm) { in Fmov() function 4622 void Fmov(const ZRegister& zd, float imm) { in Fmov() function 4626 void Fmov(const ZRegister& zd, Float16 imm) { in Fmov() function 4630 void Fmov(const ZRegister& zd, const PRegisterM& pg, double imm) { in Fmov() function [all …]
|