/external/valgrind/main/none/tests/amd64/ |
D | pcmpxstrx64.stdout.exp | 3 istri $0x4A: xmm0 55555555555555555555555555555555 rcx 5555555555550006 flags 00000881 4 istri $0x0A: xmm0 55555555555555555555555555555555 rcx 5555555555550000 flags 00000881 5 istrm $0x4A: xmm0 000000000000000000ffffffffffffff rcx 5555555555555555 flags 00000881 6 istrm $0x0A: xmm0 0000000000000000000000000000007f rcx 5555555555555555 flags 00000881 7 estri $0x4A: xmm0 55555555555555555555555555555555 rcx 555555555555000f flags 000008c1 8 estri $0x0A: xmm0 55555555555555555555555555555555 rcx 5555555555550000 flags 000008c1 9 estrm $0x4A: xmm0 ffffffffffffffffffffffffffffffff rcx 5555555555555555 flags 000008c1 10 estrm $0x0A: xmm0 0000000000000000000000000000ffff rcx 5555555555555555 flags 000008c1 13 istri $0x4A: xmm0 55555555555555555555555555555555 rcx 555555555555000f flags 000000c1 14 istri $0x0A: xmm0 55555555555555555555555555555555 rcx 5555555555550007 flags 000000c1 [all …]
|
D | pcmpxstrx64w.stdout.exp | 3 istri $0x4B: xmm0 55555555555555555555555555555555 rcx 5555555555550002 flags 00000881 4 istri $0x0B: xmm0 55555555555555555555555555555555 rcx 5555555555550000 flags 00000881 5 istrm $0x4B: xmm0 00000000000000000000ffffffffffff rcx 5555555555555555 flags 00000881 6 istrm $0x0B: xmm0 00000000000000000000000000000007 rcx 5555555555555555 flags 00000881 7 estri $0x4B: xmm0 55555555555555555555555555555555 rcx 5555555555550007 flags 000008c1 8 estri $0x0B: xmm0 55555555555555555555555555555555 rcx 5555555555550000 flags 000008c1 9 estrm $0x4B: xmm0 ffffffffffffffffffffffffffffffff rcx 5555555555555555 flags 000008c1 10 estrm $0x0B: xmm0 000000000000000000000000000000ff rcx 5555555555555555 flags 000008c1 13 istri $0x4B: xmm0 55555555555555555555555555555555 rcx 5555555555550007 flags 000000c1 14 istri $0x0B: xmm0 55555555555555555555555555555555 rcx 5555555555550003 flags 000000c1 [all …]
|
D | cmpxchg.c | 13 ULong rcx; variable 25 rcx = 0x33333333; rbx = 0x44444444; in main() 28 rax&0xff,rbx&0xff,rcx&0xff); in main() 39 "\tmov " VG_SYM(rcx) ",%rcx\n" in main() 44 "\tmov " VG_SYM(rcx) "(%rip),%rcx\n" in main() 69 rcx = 0x55555555; rbx = 0x55555555; in main() 72 rax&0xff,rbx&0xff,rcx&0xff); in main() 83 "\tmov " VG_SYM(rcx) ",%rcx\n" in main() 88 "\tmov " VG_SYM(rcx) "(%rip),%rcx\n" in main() 113 rcx = 0x33333333; rbx = 0x44444444; in main() [all …]
|
D | asorep.c | 10 unsigned long rdi, rsi, rcx, rax; in main() local 20 : "=D" (rdi), "=S" (rsi), "=c" (rcx) in main() 26 || rcx) in main() 31 : "=D" (rdi), "=c" (rcx), "+a" (rax) in main() 36 || rcx in main() 50 : "=D" (rdi), "=S" (rsi), "=c" (rcx) in main() 54 || rcx != 17ULL) in main() 60 : "=D" (rdi), "=c" (rcx), "+a" (rax) in main() 63 || rcx != 23ULL in main() 69 : "=D" (rdi), "=c" (rcx), "+a" (rax) in main() [all …]
|
/external/llvm/test/MC/X86/ |
D | x86_64-fma4-encoding.s | 6 vfmaddss (%rcx), %xmm1, %xmm0, %xmm0 10 vfmaddss %xmm1, (%rcx),%xmm0, %xmm0 18 vfmaddsd (%rcx), %xmm1, %xmm0, %xmm0 22 vfmaddsd %xmm1, (%rcx),%xmm0, %xmm0 34 vfmaddps (%rcx), %xmm1, %xmm0, %xmm0 38 vfmaddps %xmm1, (%rcx),%xmm0, %xmm0 46 vfmaddpd (%rcx), %xmm1, %xmm0, %xmm0 50 vfmaddpd %xmm1, (%rcx),%xmm0, %xmm0 58 vfmaddps (%rcx), %ymm1, %ymm0, %ymm0 62 vfmaddps %ymm1, (%rcx),%ymm0, %ymm0 [all …]
|
D | x86_64-xop-encoding.s | 10 vphsubwd (%rcx,%rax), %xmm1 18 vphsubdq (%rcx,%rax), %xmm1 34 vphaddwq (%rcx), %xmm4 50 vphadduwq (%rcx,%rax), %xmm6 66 vphaddudq 8(%rcx,%rax), %xmm4 74 vphaddubw (%rcx), %xmm3 82 vphaddubq (%rcx), %xmm4 106 vphaddbw (%rcx,%rax), %xmm1 114 vphaddbq (%rcx,%rax), %xmm1 122 vphaddbd (%rcx,%rax), %xmm1 [all …]
|
D | x86_64-avx-encoding.s | 37 vaddss -4(%rcx,%rbx,8), %xmm10, %xmm11 41 vsubss -4(%rcx,%rbx,8), %xmm10, %xmm11 45 vmulss -4(%rcx,%rbx,8), %xmm10, %xmm11 49 vdivss -4(%rcx,%rbx,8), %xmm10, %xmm11 53 vaddsd -4(%rcx,%rbx,8), %xmm10, %xmm11 57 vsubsd -4(%rcx,%rbx,8), %xmm10, %xmm11 61 vmulsd -4(%rcx,%rbx,8), %xmm10, %xmm11 65 vdivsd -4(%rcx,%rbx,8), %xmm10, %xmm11 101 vaddps -4(%rcx,%rbx,8), %xmm10, %xmm11 105 vsubps -4(%rcx,%rbx,8), %xmm10, %xmm11 [all …]
|
D | avx512-encodings.s | 18 vaddpd (%rcx), %zmm27, %zmm8 26 vaddpd (%rcx){1to8}, %zmm27, %zmm8 74 vaddps (%rcx), %zmm13, %zmm18 82 vaddps (%rcx){1to16}, %zmm13, %zmm18 130 vdivpd (%rcx), %zmm6, %zmm18 138 vdivpd (%rcx){1to8}, %zmm6, %zmm18 186 vdivps (%rcx), %zmm23, %zmm23 194 vdivps (%rcx){1to16}, %zmm23, %zmm23 242 vmaxpd (%rcx), %zmm28, %zmm30 250 vmaxpd (%rcx){1to8}, %zmm28, %zmm30 [all …]
|
/external/libvpx/libvpx/vp8/common/x86/ |
D | recon_mmx.asm | 39 movsxd rcx, dword ptr arg(3) ;dst_stride 45 movq [rdi+rcx], mm1 46 movq [rdi+rcx*2], mm2 49 lea rdi, [rdi+rcx*2] 52 add rdi, rcx 59 movq [rdi+rcx], mm4 61 movq [rdi+rcx*2], mm5 62 lea rdi, [rdi+rcx*2] 67 movq [rdi+rcx], mm0 68 movq [rdi+rcx*2],mm1 [all …]
|
/external/chromium_org/third_party/libvpx/source/libvpx/vp8/common/x86/ |
D | recon_mmx.asm | 39 movsxd rcx, dword ptr arg(3) ;dst_stride 45 movq [rdi+rcx], mm1 46 movq [rdi+rcx*2], mm2 49 lea rdi, [rdi+rcx*2] 52 add rdi, rcx 59 movq [rdi+rcx], mm4 61 movq [rdi+rcx*2], mm5 62 lea rdi, [rdi+rcx*2] 67 movq [rdi+rcx], mm0 68 movq [rdi+rcx*2],mm1 [all …]
|
/external/chromium_org/v8/test/cctest/ |
D | test-disasm-x64.cc | 66 __ movq(rbx, Operand(rsp, rcx, times_2, 0)); // [rsp+rcx*4] in TEST() 81 __ addq(rsi, Operand(rcx, times_4, 0)); in TEST() 82 __ addq(rsi, Operand(rcx, times_4, 24)); in TEST() 83 __ addq(rsi, Operand(rcx, times_4, -4)); in TEST() 84 __ addq(rsi, Operand(rcx, times_4, -1999)); in TEST() 86 __ addq(rdi, Operand(rbp, rcx, times_4, 0)); in TEST() 87 __ addq(rdi, Operand(rbp, rcx, times_4, 12)); in TEST() 88 __ addq(rdi, Operand(rbp, rcx, times_4, -8)); in TEST() 89 __ addq(rdi, Operand(rbp, rcx, times_4, -3999)); in TEST() 90 __ addq(Operand(rbp, rcx, times_4, 12), Immediate(12)); in TEST() [all …]
|
D | test-macro-assembler-x64.cc | 78 using i::rcx; 147 __ Move(rcx, value); in TestMoveSmi() 149 __ cmpq(rcx, rdx); in TestMoveSmi() 195 __ Move(rcx, Smi::FromInt(x)); in TestSmiCompare() 196 __ movq(r8, rcx); in TestSmiCompare() 199 __ SmiCompare(rcx, rdx); in TestSmiCompare() 212 __ cmpq(rcx, r8); in TestSmiCompare() 219 __ SmiCompare(rdx, rcx); in TestSmiCompare() 229 __ cmpq(rcx, rcx); in TestSmiCompare() 233 __ cmpq(rcx, r8); in TestSmiCompare() [all …]
|
/external/openssl/crypto/rc4/asm/ |
D | rc4-x86_64.S | 17 movq %rcx,%r13 19 xorq %rcx,%rcx 42 movl (%rdi,%rcx,4),%edx 43 movl %eax,(%rdi,%rcx,4) 60 movl (%rdi,%rcx,4),%edx 61 movl %eax,(%rdi,%rcx,4) 68 movl (%rdi,%rcx,4),%edx 69 movl %ebx,(%rdi,%rcx,4) 76 movl (%rdi,%rcx,4),%edx 77 movl %eax,(%rdi,%rcx,4) [all …]
|
D | rc4-md5-x86_64.S | 17 movq %rcx,%r11 23 xorq %rcx,%rcx 53 movl (%rdi,%rcx,4),%edx 55 movl %eax,(%rdi,%rcx,4) 72 movl (%rdi,%rcx,4),%edx 74 movl %ebx,(%rdi,%rcx,4) 90 movl (%rdi,%rcx,4),%edx 92 movl %eax,(%rdi,%rcx,4) 108 movl (%rdi,%rcx,4),%edx 110 movl %ebx,(%rdi,%rcx,4) [all …]
|
/external/chromium_org/third_party/boringssl/mac-x86_64/crypto/rc4/ |
D | rc4-x86_64.S | 20 movq %rcx,%r13 22 xorq %rcx,%rcx 46 movl (%rdi,%rcx,4),%edx 47 movl %eax,(%rdi,%rcx,4) 64 movl (%rdi,%rcx,4),%edx 65 movl %eax,(%rdi,%rcx,4) 72 movl (%rdi,%rcx,4),%edx 73 movl %ebx,(%rdi,%rcx,4) 80 movl (%rdi,%rcx,4),%edx 81 movl %eax,(%rdi,%rcx,4) [all …]
|
D | rc4-md5-x86_64.S | 19 movq %rcx,%r11 25 xorq %rcx,%rcx 55 movl (%rdi,%rcx,4),%edx 57 movl %eax,(%rdi,%rcx,4) 74 movl (%rdi,%rcx,4),%edx 76 movl %ebx,(%rdi,%rcx,4) 92 movl (%rdi,%rcx,4),%edx 94 movl %eax,(%rdi,%rcx,4) 110 movl (%rdi,%rcx,4),%edx 112 movl %ebx,(%rdi,%rcx,4) [all …]
|
/external/chromium_org/third_party/boringssl/linux-x86_64/crypto/rc4/ |
D | rc4-x86_64.S | 20 movq %rcx,%r13 22 xorq %rcx,%rcx 46 movl (%rdi,%rcx,4),%edx 47 movl %eax,(%rdi,%rcx,4) 64 movl (%rdi,%rcx,4),%edx 65 movl %eax,(%rdi,%rcx,4) 72 movl (%rdi,%rcx,4),%edx 73 movl %ebx,(%rdi,%rcx,4) 80 movl (%rdi,%rcx,4),%edx 81 movl %eax,(%rdi,%rcx,4) [all …]
|
D | rc4-md5-x86_64.S | 19 movq %rcx,%r11 25 xorq %rcx,%rcx 55 movl (%rdi,%rcx,4),%edx 57 movl %eax,(%rdi,%rcx,4) 74 movl (%rdi,%rcx,4),%edx 76 movl %ebx,(%rdi,%rcx,4) 92 movl (%rdi,%rcx,4),%edx 94 movl %eax,(%rdi,%rcx,4) 110 movl (%rdi,%rcx,4),%edx 112 movl %ebx,(%rdi,%rcx,4) [all …]
|
/external/openssl/crypto/aes/asm/ |
D | aesni-x86_64.S | 46 movups (%rcx),%xmm0 48 movups 16(%rcx),%xmm1 49 leaq 32(%rcx),%rcx 53 movups (%rcx),%xmm0 60 movups 16(%rcx),%xmm1 63 leaq 32(%rcx),%rcx 65 movups (%rcx),%xmm0 79 movups (%rcx),%xmm0 81 movups 16(%rcx),%xmm1 82 leaq 32(%rcx),%rcx [all …]
|
/external/openssl/crypto/bn/asm/ |
D | x86_64-gf2m.S | 23 movq %rsi,%rcx 26 shrq $2,%rcx 30 xorq %rcx,%rdx 78 movq (%rsp,%rdi,8),%rcx 80 movq %rcx,%rbx 81 shlq $4,%rcx 85 xorq %rcx,%rax 93 movq (%rsp,%rdi,8),%rcx 95 movq %rcx,%rbx 96 shlq $12,%rcx [all …]
|
D | modexp512-x86_64.pl | 353 &MULSTEP_512([map("%r$_",(8..15))], "(+8*0)(%rcx)", "%rsi", "%rbp", "%rbx"); 357 &MULSTEP_512([map("%r$_",(9..15,8))], "(+8*1)(%rcx)", "%rsi", "%rbp", "%rbx"); 428 lea (+$tmp16_offset+$STACK_DEPTH)(%rsp), %rcx # X (Asrc) 1024 bits, 16 qwords 432 &MULADD_256x512("%rdi", "%rcx", "%rsi", "%rbp", "%rbx", \@X); # rotates @X 4 times 438 add (+8*8)(%rcx), $X[4] 439 adc (+8*9)(%rcx), $X[5] 440 adc (+8*10)(%rcx), $X[6] 441 adc (+8*11)(%rcx), $X[7] 469 …lea (+$Reduce_Data_offset+$X2_offset+$STACK_DEPTH)(%rsp), %rcx # rcx -> pX2 ; 641 bits, 11 qwords 481 mov $X[6], (+8*8)(%rcx) [all …]
|
/external/chromium_org/third_party/boringssl/linux-x86_64/crypto/aes/ |
D | aesni-x86_64.S | 50 movups (%rcx),%xmm0 52 movups 16(%rcx),%xmm1 55 movups 32(%rcx),%xmm0 56 leaq 32(%rcx,%rax,1),%rcx 63 movups (%rcx,%rax,1),%xmm1 67 movups -16(%rcx,%rax,1),%xmm0 79 movups (%rcx),%xmm0 81 movups 16(%rcx),%xmm1 84 movups 32(%rcx),%xmm0 85 leaq 32(%rcx,%rax,1),%rcx [all …]
|
/external/chromium_org/third_party/libjpeg_turbo/simd/ |
D | jcsamss2-64.asm | 52 mov rcx, r13 53 shl rcx,3 ; imul rcx,DCTSIZE (rcx = output_cols) 60 push rcx 61 shl rcx,1 ; output_cols * 2 62 sub rcx,rdx 73 push rcx 81 pop rcx 89 pop rcx ; output_cols 106 push rcx 113 cmp rcx, byte SIZEOF_XMMWORD [all …]
|
/external/chromium_org/third_party/boringssl/mac-x86_64/crypto/aes/ |
D | aesni-x86_64.S | 50 movups (%rcx),%xmm0 52 movups 16(%rcx),%xmm1 55 movups 32(%rcx),%xmm0 56 leaq 32(%rcx,%rax,1),%rcx 63 movups (%rcx,%rax,1),%xmm1 67 movups -16(%rcx,%rax,1),%xmm0 79 movups (%rcx),%xmm0 81 movups 16(%rcx),%xmm1 84 movups 32(%rcx),%xmm0 85 leaq 32(%rcx,%rax,1),%rcx [all …]
|
/external/chromium_org/third_party/boringssl/win-x86_64/crypto/rc4/ |
D | rc4-x86_64.asm | 13 mov rdi,rcx 16 mov rcx,r9 31 mov r13,rcx 33 xor rcx,rcx 57 mov edx,DWORD PTR[rcx*4+rdi] 58 mov DWORD PTR[rcx*4+rdi],eax 75 mov edx,DWORD PTR[rcx*4+rdi] 76 mov DWORD PTR[rcx*4+rdi],eax 83 mov edx,DWORD PTR[rcx*4+rdi] 84 mov DWORD PTR[rcx*4+rdi],ebx [all …]
|