/external/openssl/crypto/bn/asm/ |
D | modexp512-x86_64.S | 11 movq %rdx,%rbx 17 addq %rbx,%r9 19 movq %rdx,%rbx 25 addq %rbx,%r10 27 movq %rdx,%rbx 33 addq %rbx,%r11 35 movq %rdx,%rbx 41 addq %rbx,%r12 43 movq %rdx,%rbx 49 addq %rbx,%r13 [all …]
|
D | x86_64-gf2m.S | 28 movq %rdi,%rbx 31 shrq $3,%rbx 33 xorq %rbx,%rdx 80 movq %rcx,%rbx 84 shrq $60,%rbx 89 xorq %rbx,%rdx 95 movq %rcx,%rbx 99 shrq $52,%rbx 104 xorq %rbx,%rdx 110 movq %rcx,%rbx [all …]
|
D | x86_64-mont.S | 17 pushq %rbx 35 movq (%r12),%rbx 42 mulq %rbx 70 mulq %rbx 100 movq (%r12,%r14,8),%rbx 104 mulq %rbx 134 mulq %rbx 205 movq 40(%rsi),%rbx 214 pushq %rbx 233 movq (%r12),%rbx [all …]
|
D | modexp512-x86_64.pl | 353 &MULSTEP_512([map("%r$_",(8..15))], "(+8*0)(%rcx)", "%rsi", "%rbp", "%rbx"); 357 &MULSTEP_512([map("%r$_",(9..15,8))], "(+8*1)(%rcx)", "%rsi", "%rbp", "%rbx"); 432 &MULADD_256x512("%rdi", "%rcx", "%rsi", "%rbp", "%rbx", \@X); # rotates @X 4 times 495 mov (+8*1)(%rsi), %rbx # B1 507 mul %rbx # B1 524 mov (+8*1)(%rsi), %rbx 561 and %rsi, %rbx 567 sbb %rbx, $X[1] 580 mov (+8*5)(%rcx), %rbx 587 and %rsi, %rbx [all …]
|
/external/valgrind/main/none/tests/amd64/ |
D | cmpxchg.c | 12 ULong rbx; variable 25 rcx = 0x33333333; rbx = 0x44444444; in main() 28 rax&0xff,rbx&0xff,rcx&0xff); in main() 38 "\tmov " VG_SYM(rbx) ",%rbx\n" in main() 43 "\tmov " VG_SYM(rbx) "(%rip),%rbx\n" in main() 69 rcx = 0x55555555; rbx = 0x55555555; in main() 72 rax&0xff,rbx&0xff,rcx&0xff); in main() 82 "\tmov " VG_SYM(rbx) ",%rbx\n" in main() 87 "\tmov " VG_SYM(rbx) "(%rip),%rbx\n" in main() 113 rcx = 0x33333333; rbx = 0x44444444; in main() [all …]
|
/external/llvm/test/MC/X86/ |
D | x86_64-avx-encoding.s | 37 vaddss -4(%rcx,%rbx,8), %xmm10, %xmm11 41 vsubss -4(%rcx,%rbx,8), %xmm10, %xmm11 45 vmulss -4(%rcx,%rbx,8), %xmm10, %xmm11 49 vdivss -4(%rcx,%rbx,8), %xmm10, %xmm11 53 vaddsd -4(%rcx,%rbx,8), %xmm10, %xmm11 57 vsubsd -4(%rcx,%rbx,8), %xmm10, %xmm11 61 vmulsd -4(%rcx,%rbx,8), %xmm10, %xmm11 65 vdivsd -4(%rcx,%rbx,8), %xmm10, %xmm11 101 vaddps -4(%rcx,%rbx,8), %xmm10, %xmm11 105 vsubps -4(%rcx,%rbx,8), %xmm10, %xmm11 [all …]
|
D | x86_64-encoding.s | 19 crc32b 4(%rbx), %eax 27 crc32w 4(%rbx), %eax 35 crc32l 4(%rbx), %eax 39 crc32l 0xdeadbeef(%rbx,%rcx,8),%ecx 63 crc32b 4(%rbx), %eax 75 crc32b 4(%rbx), %rax 79 crc32q %rbx, %rax 83 crc32q 4(%rbx), %rax 125 movq 57005(,%riz), %rbx
|
/external/v8/test/cctest/ |
D | test-disasm-x64.cc | 72 __ movq(rbx, Operand(rsp, rcx, times_2, 0)); // [rsp+rcx*4] in TEST() 75 __ addq(rdx, rbx); in TEST() 76 __ addq(rdx, Operand(rbx, 0)); in TEST() 77 __ addq(rdx, Operand(rbx, 16)); in TEST() 78 __ addq(rdx, Operand(rbx, 1999)); in TEST() 88 __ addq(rbx, Immediate(12)); in TEST() 96 __ cmpb(rbx, Operand(rbp, rcx, times_2, 0)); in TEST() 97 __ cmpb(Operand(rbp, rcx, times_2, 0), rbx); in TEST() 123 __ bts(Operand(rbx, rcx, times_4, 0), rcx); in TEST() 130 __ push(Operand(rbx, rcx, times_4, 0)); in TEST() [all …]
|
/external/v8/src/x64/ |
D | builtins-x64.cc | 121 __ CmpObjectType(rax, MAP_TYPE, rbx); in Generate_JSConstructStubHelper() 158 rbx, in Generate_JSConstructStubHelper() 167 __ movq(Operand(rbx, JSObject::kMapOffset), rax); in Generate_JSConstructStubHelper() 169 __ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx); in Generate_JSConstructStubHelper() 170 __ movq(Operand(rbx, JSObject::kElementsOffset), rcx); in Generate_JSConstructStubHelper() 175 __ lea(rcx, Operand(rbx, JSObject::kHeaderSize)); in Generate_JSConstructStubHelper() 181 Operand(rbx, rsi, times_pointer_size, JSObject::kHeaderSize)); in Generate_JSConstructStubHelper() 200 __ or_(rbx, Immediate(kHeapObjectTag)); in Generate_JSConstructStubHelper() 265 __ movq(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi); in Generate_JSConstructStubHelper() 277 __ UndoAllocationInNewSpace(rbx); in Generate_JSConstructStubHelper() [all …]
|
D | ic-x64.cc | 250 StubCompiler::GenerateLoadStringLength(masm, rax, rdx, rbx, &miss, in GenerateStringLength() 265 StubCompiler::GenerateLoadFunctionPrototype(masm, rax, rdx, rbx, &miss); in GenerateFunctionPrototype() 414 rbx, in GenerateGeneric() 423 __ SmiToInteger32(rbx, rax); in GenerateGeneric() 434 __ LoadFromNumberDictionary(&slow, rcx, rax, rbx, r9, rdi, rax); in GenerateGeneric() 445 GenerateKeyStringCheck(masm, rax, rcx, rbx, &index_string, &slow); in GenerateGeneric() 452 __ movq(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset)); in GenerateGeneric() 453 __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset), in GenerateGeneric() 459 __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset)); in GenerateGeneric() 460 __ movl(rcx, rbx); in GenerateGeneric() [all …]
|
D | stub-cache-x64.cc | 397 __ LoadAddress(rbx, ref); in CompileCallLoadPropertyWithInterceptor() 471 __ movq(rbx, FieldOperand(rcx, CallHandlerInfo::kDataOffset)); in GenerateFastApiCall() 472 __ movq(Operand(rsp, 3 * kPointerSize), rbx); in GenerateFastApiCall() 478 __ lea(rbx, Operand(rsp, 3 * kPointerSize)); in GenerateFastApiCall() 493 __ movq(StackSpaceOperand(0), rbx); // v8::Arguments::implicit_args_. in GenerateFastApiCall() 494 __ addq(rbx, Immediate(argc * kPointerSize)); in GenerateFastApiCall() 495 __ movq(StackSpaceOperand(1), rbx); // v8::Arguments::values_. in GenerateFastApiCall() 1239 CheckPrototypes(object, rdx, holder, rbx, rax, rdi, name, miss); in GenerateGlobalReceiverCheck() 1306 Register reg = CheckPrototypes(object, rdx, holder, rbx, rax, rdi, in CompileCallField() 1313 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rbx); in CompileCallField() [all …]
|
D | code-stubs-x64.cc | 66 __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT); in Generate() 84 __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex); in Generate() 87 __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx); in Generate() 88 __ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx); in Generate() 92 __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx); in Generate() 122 rax, rbx, rcx, &gc, TAG_OBJECT); in Generate() 133 __ Set(rbx, 0); // Set to NULL. in Generate() 136 __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx); in Generate() 139 __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX))); in Generate() 140 __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_INDEX)), rbx); in Generate() [all …]
|
D | codegen-x64.cc | 78 __ push(rbx); in CreateTranscendentalFunction() 80 __ movq(rbx, xmm0); in CreateTranscendentalFunction() 81 __ push(rbx); in CreateTranscendentalFunction() 86 __ pop(rbx); in CreateTranscendentalFunction() 87 __ movq(xmm0, rbx); in CreateTranscendentalFunction() 89 __ pop(rbx); in CreateTranscendentalFunction() 233 __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx); in GenerateSmiOnlyToObject() 236 rbx, in GenerateSmiOnlyToObject() 285 __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx); in GenerateSmiOnlyToDouble() 288 rbx, in GenerateSmiOnlyToDouble() [all …]
|
/external/compiler-rt/lib/tsan/rtl/ |
D | tsan_rtl_amd64.S | 36 push %rbx # non-scratch 38 .cfi_rel_offset %rbx, 0 39 mov %rsp, %rbx # save current rsp 40 .cfi_def_cfa_register %rbx 47 mov %rbx, %rsp # restore the original rsp 49 pop %rbx 71 .cfi_restore %rbx 116 push %rbx # non-scratch 118 .cfi_rel_offset %rbx, 0 119 mov %rsp, %rbx # save current rsp [all …]
|
/external/openssl/crypto/modes/asm/ |
D | ghash-x86_64.S | 7 pushq %rbx 15 xorq %rbx,%rbx 33 xorq 8(%rsi,%rbx,1),%r8 35 xorq (%rsi,%rbx,1),%r9 75 xorq 8(%rsi,%rbx,1),%r8 77 xorq (%rsi,%rbx,1),%r9 86 movq 16(%rsp),%rbx 95 pushq %rbx 116 movq 16+8-128(%rsi),%rbx 121 shrq $4,%rbx [all …]
|
/external/llvm/test/MC/ELF/ |
D | relax-arith.s | 14 imul $foo, %rbx, %rbx 15 imul $foo, bar, %rbx 24 and $foo, %rbx 34 or $foo, %rbx 44 xor $foo, %rbx 54 add $foo, %rbx 64 sub $foo, %rbx 74 cmp $foo, %rbx
|
/external/openssl/crypto/sha/asm/ |
D | sha512-x86_64.S | 7 pushq %rbx 27 movq 8(%rdi),%rbx 57 movq %rbx,%r11 66 movq %rbx,%r15 105 xorq %rbx,%r10 112 andq %rbx,%r15 160 addq %r12,%rbx 166 movq %rbx,%r13 174 xorq %rbx,%r13 182 andq %rbx,%r15 [all …]
|
/external/libvpx/libvpx/vp8/common/x86/ |
D | variance_impl_mmx.asm | 20 GET_GOT rbx 82 push rbx 92 mov rbx, arg(2) ;[ref_ptr] 98 movq mm1, [rbx] ; Copy eight bytes to mm1 114 add rbx,rdx ; Inc pointer into ref data 116 movq mm1, [rbx] ; Copy eight bytes to mm1 138 add rbx,rdx ; Inc pointer into ref data 140 movq mm1, [rbx] ; Copy eight bytes to mm1 161 add rbx,rdx ; Inc pointer into ref data 163 movq mm1, [rbx] ; Copy eight bytes to mm1 [all …]
|
D | sad_sse2.asm | 98 push rbx 106 movsxd rbx, dword ptr arg(1) ;src_stride 109 lea rcx, [rsi+rbx*8] 111 lea rcx, [rcx+rbx*8] 123 movq mm2, QWORD PTR [rsi+rbx] 129 lea rsi, [rsi+rbx*2] 145 pop rbx 161 push rbx 169 movsxd rbx, dword ptr arg(1) ;src_stride 172 lea rcx, [rsi+rbx*8] [all …]
|
/external/elfutils/tests/ |
D | testfile45.expect.bz2 | ... : 44 00 03 add %r8b,(%rbx)
26 37: 44 00 08 add %r9b ... |
/external/libvpx/libvpx/vp8/encoder/x86/ |
D | subtract_sse2.asm | 22 GET_GOT rbx 81 GET_GOT rbx 91 push rbx 93 movsxd rbx, dword ptr arg(4);pred_stride 111 movdqa xmm5, [rax + rbx] 114 lea rax, [rax+rbx*2] 137 pop rbx 154 GET_GOT rbx 166 push rbx 167 movsxd rbx, dword ptr arg(6);pred_stride [all …]
|
/external/valgrind/main/coregrind/m_dispatch/ |
D | dispatch-amd64-darwin.S | 68 pushq %rbx 166 popq %rbx 214 movq %rax, %rbx /* next guest addr */ 215 andq $VG_TT_FAST_MASK, %rbx /* entry# */ 216 shlq $4, %rbx /* entry# * sizeof(FastCacheEntry) */ 217 movq 0(%rcx,%rbx,1), %r10 /* .guest */ 218 movq 8(%rcx,%rbx,1), %r11 /* .host */
|
D | dispatch-amd64-linux.S | 69 pushq %rbx 167 popq %rbx 214 movq %rax, %rbx /* next guest addr */ 215 andq $VG_TT_FAST_MASK, %rbx /* entry# */ 216 shlq $4, %rbx /* entry# * sizeof(FastCacheEntry) */ 217 movq 0(%rcx,%rbx,1), %r10 /* .guest */ 218 movq 8(%rcx,%rbx,1), %r11 /* .host */
|
/external/valgrind/main/VEX/orig_amd64/ |
D | test2.sorted | 132 44016308 addl %r12d, 8(%rbx) 162 4883C304 addq $4, %rbx 184 4801C3 addq %rax, %rbx 187 4801EB addq %rbp, %rbx 190 4801D8 addq %rbx, %rax 193 6601445316 addw %ax, 22(%rbx,%rdx,2) 373 480F44DF cmove %rdi, %rbx 464 480F45D8 cmovne %rax, %rbx 502 807B1100 cmpb $0, 17(%rbx) 504 807B1200 cmpb $0, 18(%rbx) [all …]
|
/external/openssl/crypto/rc4/asm/ |
D | rc4-x86_64.S | 11 pushq %rbx 27 xorq %rbx,%rbx 29 subq %r10,%rbx 36 andq $7,%rbx 39 subq %rbx,%r11 52 decq %rbx 142 andq $15,%rbx 144 subq %rbx,%r11 157 decq %rbx 160 movq %rcx,%rbx [all …]
|