/external/llvm-project/llvm/test/CodeGen/X86/ |
D | split-vector-rem.ll | 9 ; CHECK-NEXT: movd %xmm0, %eax 11 ; CHECK-NEXT: movd %xmm0, %ecx 14 ; CHECK-NEXT: movd %edx, %xmm0 16 ; CHECK-NEXT: movd %xmm5, %eax 18 ; CHECK-NEXT: movd %xmm5, %ecx 21 ; CHECK-NEXT: movd %edx, %xmm5 23 ; CHECK-NEXT: movd %xmm4, %eax 24 ; CHECK-NEXT: movd %xmm2, %ecx 27 ; CHECK-NEXT: movd %edx, %xmm0 29 ; CHECK-NEXT: movd %xmm4, %eax [all …]
|
D | vector-rem.ll | 8 ; CHECK-NEXT: movd %xmm2, %eax 10 ; CHECK-NEXT: movd %xmm2, %ecx 13 ; CHECK-NEXT: movd %edx, %xmm2 15 ; CHECK-NEXT: movd %xmm3, %eax 17 ; CHECK-NEXT: movd %xmm3, %ecx 20 ; CHECK-NEXT: movd %edx, %xmm3 22 ; CHECK-NEXT: movd %xmm0, %eax 23 ; CHECK-NEXT: movd %xmm1, %ecx 26 ; CHECK-NEXT: movd %edx, %xmm2 28 ; CHECK-NEXT: movd %xmm0, %eax [all …]
|
D | vec_set.ll | 9 ; X86-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero 10 ; X86-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero 12 ; X86-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero 13 ; X86-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero 16 ; X86-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero 17 ; X86-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero 19 ; X86-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero 20 ; X86-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero 29 ; X64-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero 30 ; X64-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero [all …]
|
D | pr44976.ll | 7 ; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero 8 ; CHECK-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero 10 ; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero 11 ; CHECK-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero 14 ; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero 15 ; CHECK-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero 17 ; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero 18 ; CHECK-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero 22 ; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero 23 ; CHECK-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero [all …]
|
D | mmx-build-vector.ll | 21 ; X86-NEXT: movd {{[0-9]+}}(%esp), %mm0 22 ; X86-NEXT: movd {{[0-9]+}}(%esp), %mm1 30 ; X64-NEXT: movd %edx, %mm0 31 ; X64-NEXT: movd %esi, %mm1 48 ; X86-NEXT: movd {{[0-9]+}}(%esp), %mm0 55 ; X64-NEXT: movd %esi, %mm0 71 ; X86-MMX-NEXT: movd {{[0-9]+}}(%esp), %mm0 80 ; X86-SSE-NEXT: movd {{[0-9]+}}(%esp), %mm0 88 ; X64-NEXT: movd %edx, %mm0 105 ; X86-NEXT: movd {{[0-9]+}}(%esp), %mm0 [all …]
|
D | haddsub-2.ll | 126 ; SSE3-NEXT: movd %xmm0, %eax 128 ; SSE3-NEXT: movd %xmm2, %ecx 131 ; SSE3-NEXT: movd %xmm2, %eax 133 ; SSE3-NEXT: movd %xmm0, %edx 135 ; SSE3-NEXT: movd %xmm1, %eax 137 ; SSE3-NEXT: movd %xmm0, %esi 140 ; SSE3-NEXT: movd %xmm0, %eax 142 ; SSE3-NEXT: movd %xmm0, %edi 144 ; SSE3-NEXT: movd %edi, %xmm0 145 ; SSE3-NEXT: movd %esi, %xmm1 [all …]
|
D | build-vector-128.ll | 111 ; SSE2-64-NEXT: movd %ecx, %xmm0 112 ; SSE2-64-NEXT: movd %edx, %xmm1 114 ; SSE2-64-NEXT: movd %esi, %xmm2 115 ; SSE2-64-NEXT: movd %edi, %xmm0 122 ; SSE41-64-NEXT: movd %edi, %xmm0 150 ; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero 151 ; SSE2-32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero 153 ; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero 154 ; SSE2-32-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero 157 ; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero [all …]
|
D | div-rem-pair-recomposition-signed.ll | 187 ; X86-NEXT: movd %eax, %xmm2 191 ; X86-NEXT: movd %eax, %xmm3 196 ; X86-NEXT: movd %eax, %xmm4 200 ; X86-NEXT: movd %eax, %xmm2 206 ; X86-NEXT: movd %eax, %xmm3 210 ; X86-NEXT: movd %eax, %xmm4 215 ; X86-NEXT: movd %eax, %xmm5 219 ; X86-NEXT: movd %eax, %xmm3 224 ; X86-NEXT: movd %eax, %xmm5 228 ; X86-NEXT: movd %eax, %xmm6 [all …]
|
D | div-rem-pair-recomposition-unsigned.ll | 187 ; X86-NEXT: movd %eax, %xmm2 191 ; X86-NEXT: movd %eax, %xmm3 196 ; X86-NEXT: movd %eax, %xmm4 200 ; X86-NEXT: movd %eax, %xmm2 206 ; X86-NEXT: movd %eax, %xmm3 210 ; X86-NEXT: movd %eax, %xmm4 215 ; X86-NEXT: movd %eax, %xmm5 219 ; X86-NEXT: movd %eax, %xmm3 224 ; X86-NEXT: movd %eax, %xmm5 228 ; X86-NEXT: movd %eax, %xmm6 [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | vector-rem.ll | 8 ; CHECK-NEXT: movd %xmm2, %eax 10 ; CHECK-NEXT: movd %xmm2, %ecx 13 ; CHECK-NEXT: movd %edx, %xmm2 15 ; CHECK-NEXT: movd %xmm3, %eax 17 ; CHECK-NEXT: movd %xmm3, %ecx 20 ; CHECK-NEXT: movd %edx, %xmm3 22 ; CHECK-NEXT: movd %xmm0, %eax 23 ; CHECK-NEXT: movd %xmm1, %ecx 26 ; CHECK-NEXT: movd %edx, %xmm2 28 ; CHECK-NEXT: movd %xmm0, %eax [all …]
|
D | vector-lzcnt-128.ll | 17 ; SSE2-NEXT: movd %xmm0, %rax 22 ; SSE2-NEXT: movd %rax, %xmm1 24 ; SSE2-NEXT: movd %xmm0, %rax 28 ; SSE2-NEXT: movd %rax, %xmm0 35 ; SSE3-NEXT: movd %xmm0, %rax 40 ; SSE3-NEXT: movd %rax, %xmm1 42 ; SSE3-NEXT: movd %xmm0, %rax 46 ; SSE3-NEXT: movd %rax, %xmm0 53 ; SSSE3-NEXT: movd %xmm0, %rax 58 ; SSSE3-NEXT: movd %rax, %xmm1 [all …]
|
D | haddsub-2.ll | 126 ; SSE3-NEXT: movd %xmm0, %eax 128 ; SSE3-NEXT: movd %xmm2, %ecx 131 ; SSE3-NEXT: movd %xmm2, %eax 133 ; SSE3-NEXT: movd %xmm0, %edx 135 ; SSE3-NEXT: movd %xmm1, %eax 137 ; SSE3-NEXT: movd %xmm0, %esi 140 ; SSE3-NEXT: movd %xmm0, %eax 142 ; SSE3-NEXT: movd %xmm0, %edi 144 ; SSE3-NEXT: movd %edi, %xmm0 145 ; SSE3-NEXT: movd %edx, %xmm1 [all …]
|
D | bitcast-mmx.ll | 6 ; CHECK: movd %[[REG1:[a-z]+]], %mm0 8 ; CHECK-NEXT: movd %mm0, %eax 25 ; CHECK: movd %[[REG2:[a-z]+]], %mm0 26 ; CHECK-NEXT: movd %[[REG1]], %mm1 28 ; CHECK-NEXT: movd %mm1, %rax 40 ; CHECK: movd %[[REG4:[a-z]+]], %mm0 41 ; CHECK-NEXT: movd %[[REG6:[a-z0-9]+]], %mm1 43 ; CHECK-NEXT: movd %[[REG1]], %mm0 45 ; CHECK-NEXT: movd %mm0, %rax 63 ; CHECK-NEXT: movd %mm0, %rax
|
/external/boringssl/src/crypto/fipsmodule/bn/asm/ |
D | bn-586.pl | 53 &movd("mm0",&wparam(3)); # mm0 = w 58 &movd("mm3",&DWP(0,$r,"",0)); # mm3 = r[0] 60 &movd("mm2",&DWP(0,$a,"",0)); # mm2 = a[0] 62 &movd("mm4",&DWP(4,$a,"",0)); # mm4 = a[1] 64 &movd("mm6",&DWP(8,$a,"",0)); # mm6 = a[2] 66 &movd("mm7",&DWP(12,$a,"",0)); # mm7 = a[3] 69 &movd("mm3",&DWP(4,$r,"",0)); # mm3 = r[1] 71 &movd("mm5",&DWP(8,$r,"",0)); # mm5 = r[2] 73 &movd("mm4",&DWP(12,$r,"",0)); # mm4 = r[3] 75 &movd(&DWP(0,$r,"",0),"mm1"); [all …]
|
/external/rust/crates/ring/pregenerated/ |
D | x86-mont-elf.S | 71 movd %eax,%mm7 77 movd (%edi),%mm4 78 movd (%esi),%mm5 79 movd (%ebp),%mm3 87 movd 4(%ebp),%mm1 88 movd 4(%esi),%mm0 100 movd 4(%ebp,%ecx,4),%mm1 102 movd 4(%esi,%ecx,4),%mm0 104 movd %mm3,28(%esp,%ecx,4) 116 movd %mm3,28(%esp,%ecx,4) [all …]
|
D | x86-mont-macosx.S | 70 movd %eax,%mm7 76 movd (%edi),%mm4 77 movd (%esi),%mm5 78 movd (%ebp),%mm3 86 movd 4(%ebp),%mm1 87 movd 4(%esi),%mm0 99 movd 4(%ebp,%ecx,4),%mm1 101 movd 4(%esi,%ecx,4),%mm0 103 movd %mm3,28(%esp,%ecx,4) 115 movd %mm3,28(%esp,%ecx,4) [all …]
|
/external/rust/crates/ring/crypto/fipsmodule/bn/asm/ |
D | x86-mont.pl | 153 &movd ($mask,"eax"); # mask 32 lower bits 162 &movd ($mul0,&DWP(0,$bp)); # bp[0] 163 &movd ($mul1,&DWP(0,$ap)); # ap[0] 164 &movd ($car1,&DWP(0,$np)); # np[0] 176 &movd ($acc1,&DWP(4,$np)); # np[1] 177 &movd ($acc0,&DWP(4,$ap)); # ap[1] 191 &movd ($acc1,&DWP(4,$np,$j,4)); # np[j+1] 193 &movd ($acc0,&DWP(4,$ap,$j,4)); # ap[j+1] 195 &movd (&DWP($frame-4,"esp",$j,4),$car1); # tp[j-1]= 210 &movd (&DWP($frame-4,"esp",$j,4),$car1); # tp[num-2]= [all …]
|
/external/rust/crates/ring/pregenerated/tmp/ |
D | x86-mont-win32n.asm | 81 movd mm7,eax 87 movd mm4,DWORD [edi] 88 movd mm5,DWORD [esi] 89 movd mm3,DWORD [ebp] 97 movd mm1,DWORD [4+ebp] 98 movd mm0,DWORD [4+esi] 110 movd mm1,DWORD [4+ecx*4+ebp] 112 movd mm0,DWORD [4+ecx*4+esi] 114 movd DWORD [28+ecx*4+esp],mm3 126 movd DWORD [28+ecx*4+esp],mm3 [all …]
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | sad_sse3.asm | 196 movd [rcx], xmm0 202 movd [rcx+4], xmm0 208 movd [rcx+8], xmm0 234 movd [rcx], xmm0 240 movd [rcx+4], xmm0 246 movd [rcx+8], xmm0 275 movd [rcx+8], mm7 300 movd [rcx+8], mm7 315 movd mm0, DWORD PTR [src_ptr] 316 movd mm1, DWORD PTR [ref_ptr] [all …]
|
D | sad4d_sse2.asm | 17 movd m0, [srcq +%2] 19 movd m6, [ref1q+%3] 20 movd m4, [ref2q+%3] 21 movd m7, [ref3q+%3] 22 movd m5, [ref4q+%3] 23 movd m1, [srcq +%4] 24 movd m2, [ref1q+%5] 27 movd m1, [ref2q+%5] 28 movd m2, [ref3q+%5] 29 movd m3, [ref4q+%5] [all …]
|
D | intrapred_sse2.asm | 55 movd [dstq ], m3 57 movd [dstq+strideq ], m3 60 movd [dstq ], m3 62 movd [dstq+strideq ], m3 64 movd tempd, m0 113 movd m0, [leftq] ; abcd [byte] 125 movd [dstq ], m1 127 movd [dstq+strideq], m1 131 movd [dstq ], m1 132 movd [dstq+strideq], m4 ; d, d, d, d [all …]
|
/external/llvm-project/llvm/test/MC/X86/ |
D | x86_64-encoding.s | 87 movd %r8, %mm1 label 91 movd %r8d, %mm1 label 95 movd %rdx, %mm1 label 99 movd %edx, %mm1 label 103 movd %mm1, %r8 label 107 movd %mm1, %r8d label 111 movd %mm1, %rdx label 115 movd %mm1, %edx label 119 movd %mm1, (%rax) label 123 movd (%rax), %mm1 label
|
/external/capstone/suite/MC/X86/ |
D | x86_64-encoding.s.cs | 21 0x49,0x0f,0x6e,0xc8 = movd %r8, %mm1 22 0x41,0x0f,0x6e,0xc8 = movd %r8d, %mm1 23 0x48,0x0f,0x6e,0xca = movd %rdx, %mm1 24 0x0f,0x6e,0xca = movd %edx, %mm1 25 0x49,0x0f,0x7e,0xc8 = movd %mm1, %r8 26 0x41,0x0f,0x7e,0xc8 = movd %mm1, %r8d 27 0x48,0x0f,0x7e,0xca = movd %mm1, %rdx 28 0x0f,0x7e,0xca = movd %mm1, %edx
|
/external/llvm/test/MC/X86/ |
D | x86_64-encoding.s | 87 movd %r8, %mm1 label 91 movd %r8d, %mm1 label 95 movd %rdx, %mm1 label 99 movd %edx, %mm1 label 103 movd %mm1, %r8 label 107 movd %mm1, %r8d label 111 movd %mm1, %rdx label 115 movd %mm1, %edx label
|
/external/libvpx/libvpx/vp8/common/x86/ |
D | idctllm_mmx.asm | 187 movd mm4, [rsi] 191 movd [rdx], mm0 193 movd mm4, [rsi+rax] 197 movd [rdx+rdi], mm1 199 movd mm4, [rsi+2*rax] 203 movd [rdx+rdi*2], mm2 208 movd mm4, [rsi+2*rax] 212 movd [rdx+rdi*2], mm5 236 movd mm5, arg(0) ;input_dc 251 movd mm1, [rax] [all …]
|