Home
last modified time | relevance | path

Searched refs:movsd (Results 1 – 25 of 37) sorted by relevance

12

/third_party/libffi/src/x86/
Dwin64_intel.S67 movsd XMM0, qword ptr [RSP] ; movsd (%rsp), %xmm0
69 movsd XMM1, qword ptr [RSP + 8]; movsd 8(%rsp), %xmm1
71 movsd XMM2, qword ptr [RSP + 16] ; movsd 16(%rsp), %xmm2
73 movsd XMM3, qword ptr [RSP + 24] ;movsd 24(%rsp), %xmm3
108 movsd qword ptr[r8], xmm0; movsd %xmm0, (%r8)
213 movsd qword ptr [ffi_clo_OFF_X + rsp], xmm0 ; movsd %xmm0, ffi_clo_OFF_X(%rsp)
214 movsd qword ptr [ffi_clo_OFF_X+8+rsp], xmm1 ; movsd %xmm1, ffi_clo_OFF_X+8(%rsp)
215 movsd qword ptr [ffi_clo_OFF_X+16+rsp], xmm2 ; movsd %xmm2, ffi_clo_OFF_X+16(%rsp)
216 movsd qword ptr [ffi_clo_OFF_X+24+rsp], xmm3 ; movsd %xmm3, ffi_clo_OFF_X+24(%rsp)
224 movsd xmm0, qword ptr [rsp + ffi_clo_OFF_R] ;movsd ffi_clo_OFF_R(%rsp), %xmm0
Dwin64.S67 movsd (%rsp), %xmm0
69 movsd 8(%rsp), %xmm1
71 movsd 16(%rsp), %xmm2
73 movsd 24(%rsp), %xmm3
108 movsd %xmm0, (%r8)
215 movsd %xmm0, ffi_clo_OFF_X(%rsp)
216 movsd %xmm1, ffi_clo_OFF_X+8(%rsp)
217 movsd %xmm2, ffi_clo_OFF_X+16(%rsp)
218 movsd %xmm3, ffi_clo_OFF_X+24(%rsp)
225 movsd ffi_clo_OFF_R(%rsp), %xmm0
/third_party/ffmpeg/libavcodec/x86/
Dmdct15.asm42 movsd xm1, [inq + 1*16 + 8 + %1] ; in[ 3].re, in[ 3].im, 0, 0
43 movsd xm4, [inq + 6*16 + 0 + %1] ; in[12].re, in[12].im, 0, 0
89 movsd [outq], xm0
149 movsd xmm%1, [inq + r4q*8]
154 movsd %2, [inq + r4q*8]
Daacpsdsp.asm115 movsd [lq+nq], m2
163 movsd [lq+nq], m2
283 %define MOVH movsd
437 %define MOVH movsd
460 movsd m2, [inq+6*8]
Dsbrdsp.asm458 %define MOVH movsd
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/
DREADME-FPStack.txt75 movsd 24(%esp), %xmm0
76 movsd %xmm0, 8(%esp)
DREADME.txt497 movsd 24(%esp), %xmm1
789 movsd 24(%esp), %xmm0
790 movsd %xmm0, 8(%esp)
885 movsd (%esp), %xmm0
887 movsd %xmm0, (%esp)
1016 movsd 176(%esp), %xmm2
1034 movsd 152(%esp), %xmm1
1036 movsd %xmm1, 152(%esp)
1041 movsd 152(%esp), %xmm0
1044 movsd %xmm0, 152(%esp)
DREADME-SSE.txt390 movsd 16(%esp), %xmm0
391 movsd %xmm0, (%esp)
716 Consider using movlps instead of movsd to implement (scalar_to_vector (loadf64))
717 when code size is critical. movlps is slower than movsd on core2 but it's one
749 movsd %xmm0, (%esp)
/third_party/python/Modules/_ctypes/libffi_osx/x86/
Ddarwin64.S152 movsd %xmm0, (%rdi)
280 movsd -24(%rsp), %xmm0
/third_party/node/deps/v8/src/wasm/baseline/ia32/
Dliftoff-assembler-ia32.h89 assm->movsd(dst.fp(), src); in Load()
117 assm->movsd(dst, src.fp()); in Store()
151 assm->movsd(Operand(esp, 0), reg.fp());
505 movsd(dst.fp(), src_op); in Load()
573 movsd(dst_op, src.fp()); in Store()
595 movsd(liftoff::kScratchDoubleReg, src_op); in AtomicLoad()
613 movsd(dst_op, liftoff::kScratchDoubleReg); in AtomicStore()
1194 movsd(dst, src); in Move()
1219 movsd(dst, reg.fp()); in Spill()
2078 if (dst != lhs) movsd(dst, lhs); in emit_f64_add()
[all …]
/third_party/node/deps/v8/src/codegen/ia32/
Dmacro-assembler-ia32.cc1037 movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg); in CallRecordWriteStub()
1080 movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize))); in CallRecordWriteStub()
1594 movsd(dst, Operand(esp, 0)); in CallRecordWriteStub()
1611 movsd(Operand(esp, 0), src); in CallRecordWriteStub()
1624 movsd(Operand(esp, 0), dst); in CallRecordWriteStub()
1633 movsd(dst, Operand(esp, 0)); in CallRecordWriteStub()
Dassembler-ia32.h992 void movsd(XMMRegister dst, XMMRegister src) { movsd(dst, Operand(src)); } in movsd() function
993 void movsd(XMMRegister dst, Operand src);
994 void movsd(Operand dst, XMMRegister src);
/third_party/elfutils/tests/
Dtestfile44.expect.bz2
Dtestfile45.expect.bz21testfile45.o: elf64-elf_x86_64 2 3Disassembly of section .text: 4 5 0 ...
/third_party/node/deps/v8/src/builtins/ia32/
Dbuiltins-ia32.cc3848 __ movsd(xmm0, Operand(src, 0)); in Generate_MemMove() local
3849 __ movsd(xmm1, Operand(src, count, times_1, -8)); in Generate_MemMove() local
3850 __ movsd(Operand(dst, 0), xmm0); in Generate_MemMove() local
3851 __ movsd(Operand(dst, count, times_1, -8), xmm1); in Generate_MemMove() local
3992 __ movsd(Operand(esp, offset), xmm_reg); in Generate_DeoptimizationEntry() local
4049 __ movsd(xmm0, Operand(esp, src_offset)); in Generate_DeoptimizationEntry() local
4050 __ movsd(Operand(esi, dst_offset), xmm0); in Generate_DeoptimizationEntry() local
4128 __ movsd(xmm_reg, Operand(esi, src_offset)); in Generate_DeoptimizationEntry() local
/third_party/ffmpeg/libavutil/x86/
Dlls.asm117 movsd m0, [varq + iq*8]
Dx86util.asm873 movsd %1, %2
Dx86inc.asm1408 AVX_INSTR movsd, sse2, 1, 0, 0
/third_party/ffmpeg/libavfilter/x86/
Dvf_ssim.asm257 movsd r0m, m0
/third_party/elfutils/libcpu/
Di386.mnemonics280 MNE(movsd)
Dx86_64.mnemonics268 MNE(movsd)
/third_party/node/deps/v8/src/wasm/baseline/x64/
Dliftoff-assembler-x64.h1675 if (dst != lhs) movsd(dst, lhs); in emit_f64_add()
1686 movsd(kScratchDoubleReg, rhs); in emit_f64_sub()
1687 movsd(dst, lhs); in emit_f64_sub()
1690 if (dst != lhs) movsd(dst, lhs); in emit_f64_sub()
1703 if (dst != lhs) movsd(dst, lhs); in emit_f64_mul()
1714 movsd(kScratchDoubleReg, rhs); in emit_f64_div()
1715 movsd(dst, lhs); in emit_f64_div()
1718 if (dst != lhs) movsd(dst, lhs); in emit_f64_div()
/third_party/node/deps/v8/src/compiler/backend/ia32/
Dcode-generator-ia32.cc401 __ movsd(Operand(esp, 0 * kDoubleSize), i.InputDoubleRegister(0)); \
402 __ movsd(Operand(esp, 1 * kDoubleSize), i.InputDoubleRegister(1)); \
408 __ movsd(i.OutputDoubleRegister(), Operand(esp, 0)); \
416 __ movsd(Operand(esp, 0 * kDoubleSize), i.InputDoubleRegister(0)); \
422 __ movsd(i.OutputDoubleRegister(), Operand(esp, 0)); \
/third_party/gstreamer/gstplugins_good/gst/deinterlace/x86/
Dx86inc.asm1394 AVX_INSTR movsd, sse2, 1, 0, 0
/third_party/skia/third_party/externals/libjpeg-turbo/simd/x86_64/
Djchuff-sse2.asm334 movsd xmm1, xmm2 ;B: w1 = 24 26 25 27 11 04 05 12

12