/third_party/vixl/test/aarch64/ |
D | test-assembler-neon-aarch64.cc | 2212 __ Movi(v0.V2D(), 0x0001020304050607, 0x08090a0b0c0d0e0f); in TEST() local 2213 __ Movi(v1.V2D(), 0x1011121314151617, 0x18191a1b1c1d1e1f); in TEST() local 2305 __ Movi(v0.V2D(), 0x0001020304050607, 0x08090a0b0c0d0e0f); in TEST() local 2306 __ Movi(v1.V2D(), 0x1011121314151617, 0x18191a1b1c1d1e1f); in TEST() local 2307 __ Movi(v2.V2D(), 0x2021222324252627, 0x28292a2b2c2d2e2f); in TEST() local 2404 __ Movi(v0.V2D(), 0x0001020304050607, 0x08090a0b0c0d0e0f); in TEST() local 2405 __ Movi(v1.V2D(), 0x1011121314151617, 0x18191a1b1c1d1e1f); in TEST() local 2406 __ Movi(v2.V2D(), 0x2021222324252627, 0x28292a2b2c2d2e2f); in TEST() local 2407 __ Movi(v3.V2D(), 0x2021222324252627, 0x28292a2b2c2d2e2f); in TEST() local 3464 __ Movi(v0.V2D(), 0, 0x2222222233333333); in TEST() local [all …]
|
D | test-disasm-neon-aarch64.cc | 3104 COMPARE_MACRO(Movi(v4.V8B(), 0xaa), "movi v4.8b, #0xaa"); in TEST() 3105 COMPARE_MACRO(Movi(v1.V16B(), 0xcc), "movi v1.16b, #0xcc"); in TEST() 3107 COMPARE_MACRO(Movi(v4.V4H(), 0xaa, LSL, 0), "movi v4.4h, #0xaa, lsl #0"); in TEST() 3108 COMPARE_MACRO(Movi(v1.V8H(), 0xcc, LSL, 8), "movi v1.8h, #0xcc, lsl #8"); in TEST() 3110 COMPARE_MACRO(Movi(v4.V2S(), 0xaa, LSL, 0), "movi v4.2s, #0xaa, lsl #0"); in TEST() 3111 COMPARE_MACRO(Movi(v1.V2S(), 0xcc, LSL, 8), "movi v1.2s, #0xcc, lsl #8"); in TEST() 3112 COMPARE_MACRO(Movi(v4.V4S(), 0xaa, LSL, 16), "movi v4.4s, #0xaa, lsl #16"); in TEST() 3113 COMPARE_MACRO(Movi(v1.V4S(), 0xcc, LSL, 24), "movi v1.4s, #0xcc, lsl #24"); in TEST() 3115 COMPARE_MACRO(Movi(v4.V2S(), 0xaa, MSL, 8), "movi v4.2s, #0xaa, msl #8"); in TEST() 3116 COMPARE_MACRO(Movi(v1.V2S(), 0xcc, MSL, 16), "movi v1.2s, #0xcc, msl #16"); in TEST() [all …]
|
D | test-simulator-aarch64.cc | 1509 __ Movi(vd.V16B(), 0); in Test1OpNEON_Helper() local 2464 __ Movi(vd.V16B(), 0); in Test2OpImmNEON_Helper() local
|
D | test-assembler-sve-aarch64.cc | 19653 __ Movi(v0.V2D(), 0xffeeddccbbaa9988, 0x77665544332211); in TEST_SVE() local 19654 __ Movi(v1.V2D(), 0xaa5555aa55555555, 0x55aaaa55aaaaaa); in TEST_SVE() local 19655 __ Movi(v2.V2D(), 0, 0); in TEST_SVE() local 19656 __ Movi(v3.V2D(), 0, 0); in TEST_SVE() local 19657 __ Movi(v4.V2D(), 0, 0); in TEST_SVE() local 19658 __ Movi(v5.V2D(), 0, 0); in TEST_SVE() local 19659 __ Movi(v6.V2D(), 0, 0); in TEST_SVE() local 19660 __ Movi(v7.V2D(), 0, 0); in TEST_SVE() local
|
/third_party/vixl/examples/aarch64/ |
D | neon-matrix-multiply.cc | 81 __ Movi(v0.V16B(), 0); in GenerateNEONMatrixMultiply() local 82 __ Movi(v1.V16B(), 0); in GenerateNEONMatrixMultiply() local 83 __ Movi(v2.V16B(), 0); in GenerateNEONMatrixMultiply() local 84 __ Movi(v3.V16B(), 0); in GenerateNEONMatrixMultiply() local
|
/third_party/vixl/src/aarch64/ |
D | macro-assembler-aarch64.cc | 1163 void MacroAssembler::Movi(const VRegister& vd, in Emit() function in vixl::aarch64::MacroAssembler 1188 void MacroAssembler::Movi(const VRegister& vd, uint64_t hi, uint64_t lo) { in Emit() function in vixl::aarch64::MacroAssembler 1191 Movi(vd.V2D(), lo); in Emit() 1599 Movi(vd, rawbits); in Emit() 1640 Movi(vd, rawbits); in Emit() 1677 Movi(vd, static_cast<uint64_t>(rawbits)); in Emit()
|
D | macro-assembler-aarch64.h | 3441 void Movi(const VRegister& vd, 3445 void Movi(const VRegister& vd, uint64_t hi, uint64_t lo);
|
/third_party/node/deps/v8/src/codegen/arm64/ |
D | macro-assembler-arm64-inl.h | 685 Movi(vd, bits); in Fmov() 712 Movi(vd, bits); in Fmov()
|
D | macro-assembler-arm64.cc | 546 void TurboAssembler::Movi(const VRegister& vd, uint64_t imm, Shift shift, in Movi() function in v8::internal::TurboAssembler 567 void TurboAssembler::Movi(const VRegister& vd, uint64_t hi, uint64_t lo) { in Movi() function in v8::internal::TurboAssembler 570 Movi(vd.V2D(), lo); in Movi()
|
D | macro-assembler-arm64.h | 938 void Movi(const VRegister& vd, uint64_t imm, Shift shift = LSL, 940 void Movi(const VRegister& vd, uint64_t hi, uint64_t lo);
|
/third_party/node/deps/v8/src/compiler/backend/arm64/ |
D | code-generator-arm64.cc | 2475 __ Movi(mask.V2D(), 0x0000'0008'0000'0004, 0x0000'0002'0000'0001); in AssembleArchInstruction() local 2560 __ Movi(mask.V2D(), 0x0080'0040'0020'0010, 0x0008'0004'0002'0001); in AssembleArchInstruction() local 2616 __ Movi(mask.V2D(), 0x8040'2010'0804'0201); in AssembleArchInstruction() local 2627 __ Movi(i.OutputSimd128Register().V16B(), imm2, imm1); in AssembleArchInstruction() local 2753 __ Movi(temp, imm2, imm1); in AssembleArchInstruction() local
|
/third_party/node/deps/v8/src/wasm/baseline/arm64/ |
D | liftoff-assembler-arm64.h | 2234 Movi(mask.V2D(), 0x0000'0008'0000'0004, 0x0000'0002'0000'0001); in emit_i32x4_bitmask() 2410 Movi(mask.V2D(), 0x0080'0040'0020'0010, 0x0008'0004'0002'0001); in emit_i16x8_bitmask() 2551 Movi(temp.V16B(), imms[1], imms[0]); in emit_i8x16_shuffle() 2616 Movi(mask.V2D(), 0x8040'2010'0804'0201); in emit_i8x16_bitmask()
|