/third_party/libffi/src/tile/ |
D | tile.S | 83 #define FRAME_SIZE r10 253 SW sp, r10 254 .cfi_return_column r10 255 .cfi_offset r10, 0 258 addli r10, sp, -(CLOSURE_FRAME_SIZE - REG_SIZE) 262 SW r10, sp 271 addi r10, sp, LINKAGE_SIZE 275 STORE_REG(r0, r10) 276 STORE_REG(r1, r10) 277 STORE_REG(r2, r10) [all …]
|
/third_party/skia/tests/sksl/runtime/ |
D | SwitchWithLoops.skvm | 12 10 r10 = splat 1 (1.4012985e-45) 13 11 r9 = eq_i32 r10 r9 14 12 r11 = bit_and r10 r9 16 14 r13 = add_i32 r11 r10 19 17 r13 = add_i32 r11 r10 23 21 r12 = add_i32 r11 r10 27 25 r15 = add_i32 r11 r10 31 29 r14 = add_i32 r11 r10 34 32 r14 = add_i32 r11 r10 38 36 r13 = add_i32 r11 r10 [all …]
|
D | Switch.skvm | 12 10 r10 = trunc r1 13 11 r8 = eq_i32 r8 r10 21 19 r10 = eq_i32 r16 r10 22 20 r10 = bit_or r8 r10 23 21 r10 = bit_and r10 r15 24 22 r11 = select r10 r0 r11 25 23 r12 = select r10 r1 r12 26 24 r13 = select r10 r2 r13 27 25 r14 = select r10 r3 r14 28 26 r10 = bit_xor r9 r10 [all …]
|
D | SwitchWithFallthrough.skvm | 12 10 r10 = trunc r1 14 12 r11 = eq_i32 r11 r10 18 16 r14 = eq_i32 r14 r10 22 20 r8 = eq_i32 r8 r10 37 35 r10 = eq_i32 r9 r10 38 36 r10 = bit_or r8 r10 39 37 r10 = bit_and r12 r10 40 38 r13 = bit_and r10 r13
|
/third_party/openssl/crypto/bn/asm/ |
D | x86_64-mont5.pl | 81 $lo0="%r10"; 129 lea -280(%rsp,$num,8),%r10 # future alloca(8*(num+2)+256+8) 131 and \$-1024,%r10 # minimize TLB usage 141 sub %r10,%r11 143 lea (%r10,%r11),%rsp 145 cmp %r10,%rsp 152 cmp %r10,%rsp 156 lea .Linc(%rip),%r10 167 movdqa 0(%r10),%xmm0 # 00000001000000010000000000000000 168 movdqa 16(%r10),%xmm1 # 00000002000000020000000200000002 [all …]
|
D | rsaz-x86_64.pl | 149 movq %rdx, %r10 150 adcq \$0, %r10 153 addq %rax, %r10 196 addq %rax, %r10 243 adcq %r10, %r10 252 adcq %rdx, %r10 256 movq %r10, 24(%rsp) 305 movq 24($inp), %r10 316 mulq %r10 323 mulq %r10 [all …]
|
D | x86_64-mont.pl | 92 $lo0="%r10"; 145 lea -16(%rsp,$num,8),%r10 # future alloca(8*(num+2)) 147 and \$-1024,%r10 # minimize TLB usage 157 sub %r10,%r11 159 lea (%r10,%r11),%rsp 161 cmp %r10,%rsp 169 cmp %r10,%rsp 367 my @A=("%r10","%r11"); 400 lea -32(%rsp,$num,8),%r10 # future alloca(8*(num+4)) 402 and \$-1024,%r10 # minimize TLB usage [all …]
|
/third_party/ffmpeg/libavcodec/arm/ |
D | vp8dsp_armv6.S | 61 push {r4-r10, lr} 64 mov r10, #0 68 stm r1!, {r10, lr} 71 stm r1!, {r10, lr} 74 stm r1!, {r10, lr} 77 stm r1!, {r10, lr} 125 sbfx r10, r4, #3, #13 130 sxth r10, r4 134 asr r10, #3 @ block[0][3] 143 strh r10, [r0], #32 [all …]
|
D | sbcdsp_armv6.S | 41 ldrd r10, r11, [r2, #16] 47 smlad r3, r8, r10, r3 50 ldrd r10, r11, [r2, #48] 55 smlad r3, r8, r10, r3 58 ldrd r10, r11, [r2, #8] 64 smlad r12, r8, r10, r14 67 ldrd r10, r11, [r2, #40] 72 smlad r12, r8, r10, r12 75 ldrd r10, r11, [r2, #72] 79 smlad r12, r8, r10, r12 @ t1[2] is done [all …]
|
D | mpegaudiodsp_fixed_armv6.S | 75 sum8 r8, r9, r1, r0, r10, r11, r12, lr 76 sum8 r8, r9, r1, r2, r10, r11, r12, lr, rsb, 32 77 round r10, r8, r9 78 strh_post r10, r3, r4 84 ldr r10, [r1, #4]! 88 ldr r10, [r1, #4*64*\i] 91 smlal r8, r9, r10, r12 92 ldr r10, [r0, #4*64*(\i+1)] 101 smlal r8, r9, r12, r10 107 smlal r4, r7, r11, r10 [all …]
|
D | hpeldsp_arm.S | 111 ALIGN_QWORD_D 1, r9, r10, r11, r12, r4, r5, r6, r7, r8 122 ALIGN_QWORD_D 2, r9, r10, r11, r12, r4, r5, r6, r7, r8 133 ALIGN_QWORD_D 3, r9, r10, r11, r12, r4, r5, r6, r7, r8 198 push {r4-r10,lr} 202 ldm r1, {r4-r5, r10} 204 ALIGN_DWORD_D 1, r6, r7, r4, r5, r10 211 pop {r4-r10,pc} 214 ldm r1, {r4-r5, r10} 216 ALIGN_DWORD_D 1, r6, r7, r4, r5, r10 217 ALIGN_DWORD_D 2, r8, r9, r4, r5, r10 [all …]
|
D | vp8_armv6.S | 80 rac_get_prob r5, r6, r7, r8, r0, r9, r10 85 rac_get_prob r5, r6, r7, r8, r0, r9, r10 96 ldrhcs r10, [r7], #2 100 rev16cs r10, r10 103 T lslcs r10, r10, r6 104 T orrcs r8, r8, r10 105 A orrcs r8, r8, r10, lsl r6 109 movrel r10, zigzag_scan-1 113 ldrb r10, [r10, r3] 117 strh r12, [r1, r10] [all …]
|
D | simple_idct_armv6.S | 59 ldr r10,=W57 /* r10 = W5 | (W7 << 16) */ 64 smusdx r11,r3, r10 /* r11 = B3 = W7*row[1] - W5*row[3] */ 66 pkhtb r2, ip, r10,asr #16 /* r3 = W7 | (W3 << 16) */ 67 pkhbt r1, ip, r10,lsl #16 /* r1 = W1 | (W5 << 16) */ 69 smlad r8, lr, r10,r8 /* B0 += W5*row[5] + W7*row[7] */ 70 smusdx r10,r3, r1 /* r10 = B2 = W5*row[1] - W1*row[3] */ 73 smlad r10,lr, r2, r10 /* B2 += W7*row[5] + W3*row[7] */ 96 ldr r10,=W57 /* r10 = W5 | (W7 << 16) */ 103 smusdx r11,r3, r10 /* r11 = B3 = W7*row[1] - W5*row[3] */ 105 pkhtb r2, ip, r10,asr #16 /* r3 = W7 | (W3 << 16) */ [all …]
|
D | hpeldsp_armv6.S | 50 ldr r10, [r1, #8] 53 strd r10, r11, [r0, #8] 93 lsr r10, r8, #8 95 orr r10, r10, r9, lsl #24 103 eor r14, r8, r10 106 uhadd8 r8, r8, r10 131 eor r10, r4, r6 134 and r10, r10, r12 136 uadd8 r8, r8, r10 140 uhadd8 r10, r4, r6 [all …]
|
/third_party/python/Modules/_ctypes/libffi_osx/x86/ |
D | darwin64.S | 47 movq (%rsp), %r10 /* Load return address. */ 53 movq %r10, 24(%rax) /* Relocate return address. */ 57 movq %r12, %r10 62 movq (%r10), %rdi 63 movq 8(%r10), %rsi 64 movq 16(%r10), %rdx 65 movq 24(%r10), %rcx 66 movq 32(%r10), %r8 67 movq 40(%r10), %r9 73 leaq 176(%r10), %rsp [all …]
|
/third_party/gstreamer/gstplugins_good/gst/goom/ |
D | ppc_zoom_ultimate.s | 40 ; r10 <=> int [16][16] precalccoeffs 63 mr r11,r10 82 lwz r10,4(r30) ; py2 90 sub r10,r10,r29 93 mullw r10,r10,r9 97 srawi r10,r10,16 99 add r29,r29,r10 103 rlwinm r10,r2,6,28-6,31-6 ; r10 <- (r2 << 2) & 0x000002D0 (r10=(r2%16)*4*16) 105 rlwimi r10, r29, 2, 28-2, 31-2 ; r10 <- ((r29 << 2) & 0x0000002D) | (r10 & !0x0000002D) (r10=… 116 lwzx r10,r11,r10 ; Loads coefs [all …]
|
/third_party/musl/src/string/arm/ |
D | memcpy.S | 112 ldrne r10,[r1], #4 /* 4 bytes */ 113 strne r10,[r0], #4 289 ldmia r1!, { r5,r6,r7, r8,r9,r10,r11} 305 orr r9, r9, r10, lsr #16 306 mov r10, r10, lsl #16 307 orr r10, r10, r11, lsr #16 308 stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10} 323 orr r9, r9, r10, lsl #16 324 mov r10, r10, lsr #16 325 orr r10, r10, r11, lsl #16 [all …]
|
/third_party/openssl/crypto/aes/asm/ |
D | vpaes-x86_64.pl | 89 ## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax 112 lea .Lk_mc_backward(%rip),%r10 125 movdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[] 127 movdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[] 168 movdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo 169 movdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 173 movdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[] 202 lea .Lk_dsbd(%rip),%r10 209 add %r10, %r11 217 movdqa -0x20(%r10),%xmm4 # 4 : sb9u [all …]
|
D | vpaes-armv8.pl | 160 // Fills register %r10 -> .aes_consts (so you can -fPIC) 185 // Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax 211 ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[] 217 ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[] 247 // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo 248 // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 250 ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[] 305 ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[] 316 ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[] 365 // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo [all …]
|
/third_party/openssl/crypto/camellia/asm/ |
D | cmll-x86_64.pl | 527 shl \$32,%r10 # @S[2]|| 529 or %r11,%r10 # ||@S[3] 532 &_saveround (2,$out,-128,"%r8","%r10"); # KA<<<0 535 &_rotl128 ("%r8","%r10",15); 536 &_saveround (6,$out,-128,"%r8","%r10"); # KA<<<15 537 &_rotl128 ("%r8","%r10",15); # 15+15=30 538 &_saveround (8,$out,-128,"%r8","%r10"); # KA<<<30 541 &_rotl128 ("%r8","%r10",15); # 30+15=45 545 &_rotl128 ("%r8","%r10",15); # 45+15=60 546 &_saveround (14,$out,-128,"%r8","%r10"); # KA<<<60 [all …]
|
/third_party/libffi/src/x86/ |
D | unix64.S | 61 movq (%rsp), %r10 /* Load return address. */ 66 movq %r10, 24(%rax) /* Relocate return address. */ 80 movq %rdi, %r10 /* Save a copy of the register area. */ 85 movq (%r10), %rdi 86 movq 0x08(%r10), %rsi 87 movq 0x10(%r10), %rdx 88 movq 0x18(%r10), %rcx 89 movq 0x20(%r10), %r8 90 movq 0x28(%r10), %r9 91 movl 0xb0(%r10), %eax [all …]
|
/third_party/openssl/crypto/ |
D | x86_64cpuid.pl | 123 movzb %cl,%r10 # number of cores - 1 124 inc %r10 # number of cores 285 xor %r10,%r10 290 mov ($arg1),%r10 293 xor ($arg2),%r10 295 or %r11,%r10 345 xorq %r10,%r10 367 xorq %r10,%r10 374 my $out="%r10"; 485 ${rdop} %r10 [all …]
|
/third_party/musl/porting/liteos_m/kernel/src/string/arch/arm/ |
D | memcpy_le.S | 127 ldrne r10,[r1], #4 /* 4 bytes */ 128 strne r10,[r0], #4 290 ldmia r1!, { r5,r6,r7, r8,r9,r10,r11} 306 orr r9, r9, r10, lsl #16 307 mov r10, r10, lsr #16 308 orr r10, r10, r11, lsl #16 309 stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10} 317 ldmia r1!, { r5,r6,r7, r8,r9,r10,r11} 333 orr r9, r9, r10, lsl #24 334 mov r10, r10, lsr #8 [all …]
|
/third_party/musl/porting/uniproton/kernel/src/string/arch/arm/ |
D | memcpy_le.S | 123 ldrne r10,[r1], #4 /* 4 bytes */ 124 strne r10,[r0], #4 286 ldmia r1!, { r5,r6,r7, r8,r9,r10,r11} 302 orr r9, r9, r10, lsl #16 303 mov r10, r10, lsr #16 304 orr r10, r10, r11, lsl #16 305 stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10} 313 ldmia r1!, { r5,r6,r7, r8,r9,r10,r11} 329 orr r9, r9, r10, lsl #24 330 mov r10, r10, lsr #8 [all …]
|
/third_party/openssl/crypto/sha/asm/ |
D | keccak1600-avx512vl.pl | 64 lea iotas(%rip),%r10 173 vpternlogq \$0x96,(%r10),@T[0],$A00 174 lea 32(%r10),%r10 197 lea 96(%rsp),%r10 218 vmovdqa @T[0],32*2-96(%r10) # zero transfer area on stack 219 vmovdqa @T[0],32*3-96(%r10) 220 vmovdqa @T[0],32*4-96(%r10) 221 vmovdqa @T[0],32*5-96(%r10) 222 vmovdqa @T[0],32*6-96(%r10) 239 mov %r8,$A_jagged[$i]-96(%r10) [all …]
|