/third_party/libffi/src/x86/ |
D | win64_intel.S | 67 movsd XMM0, qword ptr [RSP] ; movsd (%rsp), %xmm0 69 movsd XMM1, qword ptr [RSP + 8]; movsd 8(%rsp), %xmm1 71 movsd XMM2, qword ptr [RSP + 16] ; movsd 16(%rsp), %xmm2 73 movsd XMM3, qword ptr [RSP + 24] ;movsd 24(%rsp), %xmm3 108 movsd qword ptr[r8], xmm0; movsd %xmm0, (%r8) 213 movsd qword ptr [ffi_clo_OFF_X + rsp], xmm0 ; movsd %xmm0, ffi_clo_OFF_X(%rsp) 214 movsd qword ptr [ffi_clo_OFF_X+8+rsp], xmm1 ; movsd %xmm1, ffi_clo_OFF_X+8(%rsp) 215 movsd qword ptr [ffi_clo_OFF_X+16+rsp], xmm2 ; movsd %xmm2, ffi_clo_OFF_X+16(%rsp) 216 movsd qword ptr [ffi_clo_OFF_X+24+rsp], xmm3 ; movsd %xmm3, ffi_clo_OFF_X+24(%rsp) 224 movsd xmm0, qword ptr [rsp + ffi_clo_OFF_R] ;movsd ffi_clo_OFF_R(%rsp), %xmm0
|
D | win64.S | 67 movsd (%rsp), %xmm0 69 movsd 8(%rsp), %xmm1 71 movsd 16(%rsp), %xmm2 73 movsd 24(%rsp), %xmm3 108 movsd %xmm0, (%r8) 215 movsd %xmm0, ffi_clo_OFF_X(%rsp) 216 movsd %xmm1, ffi_clo_OFF_X+8(%rsp) 217 movsd %xmm2, ffi_clo_OFF_X+16(%rsp) 218 movsd %xmm3, ffi_clo_OFF_X+24(%rsp) 225 movsd ffi_clo_OFF_R(%rsp), %xmm0
|
/third_party/ffmpeg/libavcodec/x86/ |
D | mdct15.asm | 42 movsd xm1, [inq + 1*16 + 8 + %1] ; in[ 3].re, in[ 3].im, 0, 0 43 movsd xm4, [inq + 6*16 + 0 + %1] ; in[12].re, in[12].im, 0, 0 89 movsd [outq], xm0 149 movsd xmm%1, [inq + r4q*8] 154 movsd %2, [inq + r4q*8]
|
D | aacpsdsp.asm | 115 movsd [lq+nq], m2 163 movsd [lq+nq], m2 283 %define MOVH movsd 437 %define MOVH movsd 460 movsd m2, [inq+6*8]
|
D | sbrdsp.asm | 458 %define MOVH movsd
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/ |
D | README-FPStack.txt | 75 movsd 24(%esp), %xmm0 76 movsd %xmm0, 8(%esp)
|
D | README.txt | 497 movsd 24(%esp), %xmm1 789 movsd 24(%esp), %xmm0 790 movsd %xmm0, 8(%esp) 885 movsd (%esp), %xmm0 887 movsd %xmm0, (%esp) 1016 movsd 176(%esp), %xmm2 1034 movsd 152(%esp), %xmm1 1036 movsd %xmm1, 152(%esp) 1041 movsd 152(%esp), %xmm0 1044 movsd %xmm0, 152(%esp)
|
D | README-SSE.txt | 390 movsd 16(%esp), %xmm0 391 movsd %xmm0, (%esp) 716 Consider using movlps instead of movsd to implement (scalar_to_vector (loadf64)) 717 when code size is critical. movlps is slower than movsd on core2 but it's one 749 movsd %xmm0, (%esp)
|
/third_party/python/Modules/_ctypes/libffi_osx/x86/ |
D | darwin64.S | 152 movsd %xmm0, (%rdi) 280 movsd -24(%rsp), %xmm0
|
/third_party/node/deps/v8/src/wasm/baseline/ia32/ |
D | liftoff-assembler-ia32.h | 89 assm->movsd(dst.fp(), src); in Load() 117 assm->movsd(dst, src.fp()); in Store() 151 assm->movsd(Operand(esp, 0), reg.fp()); 505 movsd(dst.fp(), src_op); in Load() 573 movsd(dst_op, src.fp()); in Store() 595 movsd(liftoff::kScratchDoubleReg, src_op); in AtomicLoad() 613 movsd(dst_op, liftoff::kScratchDoubleReg); in AtomicStore() 1194 movsd(dst, src); in Move() 1219 movsd(dst, reg.fp()); in Spill() 2078 if (dst != lhs) movsd(dst, lhs); in emit_f64_add() [all …]
|
/third_party/node/deps/v8/src/codegen/ia32/ |
D | macro-assembler-ia32.cc | 1037 movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg); in CallRecordWriteStub() 1080 movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize))); in CallRecordWriteStub() 1594 movsd(dst, Operand(esp, 0)); in CallRecordWriteStub() 1611 movsd(Operand(esp, 0), src); in CallRecordWriteStub() 1624 movsd(Operand(esp, 0), dst); in CallRecordWriteStub() 1633 movsd(dst, Operand(esp, 0)); in CallRecordWriteStub()
|
D | assembler-ia32.h | 992 void movsd(XMMRegister dst, XMMRegister src) { movsd(dst, Operand(src)); } in movsd() function 993 void movsd(XMMRegister dst, Operand src); 994 void movsd(Operand dst, XMMRegister src);
|
/third_party/elfutils/tests/ |
D | testfile44.expect.bz2 |
|
D | testfile45.expect.bz2 | 1testfile45.o: elf64-elf_x86_64
2
3Disassembly of section .text:
4
5 0 ... |
/third_party/node/deps/v8/src/builtins/ia32/ |
D | builtins-ia32.cc | 3848 __ movsd(xmm0, Operand(src, 0)); in Generate_MemMove() local 3849 __ movsd(xmm1, Operand(src, count, times_1, -8)); in Generate_MemMove() local 3850 __ movsd(Operand(dst, 0), xmm0); in Generate_MemMove() local 3851 __ movsd(Operand(dst, count, times_1, -8), xmm1); in Generate_MemMove() local 3992 __ movsd(Operand(esp, offset), xmm_reg); in Generate_DeoptimizationEntry() local 4049 __ movsd(xmm0, Operand(esp, src_offset)); in Generate_DeoptimizationEntry() local 4050 __ movsd(Operand(esi, dst_offset), xmm0); in Generate_DeoptimizationEntry() local 4128 __ movsd(xmm_reg, Operand(esi, src_offset)); in Generate_DeoptimizationEntry() local
|
/third_party/ffmpeg/libavutil/x86/ |
D | lls.asm | 117 movsd m0, [varq + iq*8]
|
D | x86util.asm | 873 movsd %1, %2
|
D | x86inc.asm | 1408 AVX_INSTR movsd, sse2, 1, 0, 0
|
/third_party/ffmpeg/libavfilter/x86/ |
D | vf_ssim.asm | 257 movsd r0m, m0
|
/third_party/elfutils/libcpu/ |
D | i386.mnemonics | 280 MNE(movsd)
|
D | x86_64.mnemonics | 268 MNE(movsd)
|
/third_party/node/deps/v8/src/wasm/baseline/x64/ |
D | liftoff-assembler-x64.h | 1675 if (dst != lhs) movsd(dst, lhs); in emit_f64_add() 1686 movsd(kScratchDoubleReg, rhs); in emit_f64_sub() 1687 movsd(dst, lhs); in emit_f64_sub() 1690 if (dst != lhs) movsd(dst, lhs); in emit_f64_sub() 1703 if (dst != lhs) movsd(dst, lhs); in emit_f64_mul() 1714 movsd(kScratchDoubleReg, rhs); in emit_f64_div() 1715 movsd(dst, lhs); in emit_f64_div() 1718 if (dst != lhs) movsd(dst, lhs); in emit_f64_div()
|
/third_party/node/deps/v8/src/compiler/backend/ia32/ |
D | code-generator-ia32.cc | 401 __ movsd(Operand(esp, 0 * kDoubleSize), i.InputDoubleRegister(0)); \ 402 __ movsd(Operand(esp, 1 * kDoubleSize), i.InputDoubleRegister(1)); \ 408 __ movsd(i.OutputDoubleRegister(), Operand(esp, 0)); \ 416 __ movsd(Operand(esp, 0 * kDoubleSize), i.InputDoubleRegister(0)); \ 422 __ movsd(i.OutputDoubleRegister(), Operand(esp, 0)); \
|
/third_party/gstreamer/gstplugins_good/gst/deinterlace/x86/ |
D | x86inc.asm | 1394 AVX_INSTR movsd, sse2, 1, 0, 0
|
/third_party/skia/third_party/externals/libjpeg-turbo/simd/x86_64/ |
D | jchuff-sse2.asm | 334 movsd xmm1, xmm2 ;B: w1 = 24 26 25 27 11 04 05 12
|