Home
last modified time | relevance | path

Searched refs:r10 (Results 1 – 25 of 211) sorted by relevance

123456789

/external/openssl/crypto/sha/asm/
Dsha1-armv4-large.s22 ldrb r10,[r1,#2]
27 orr r9,r9,r10,lsl#8
28 eor r10,r5,r6 @ F_xx_xx
35 eor r10,r5,r6 @ F_xx_xx
41 and r10,r4,r10,ror#2
43 eor r10,r10,r6,ror#2 @ F_00_19(B,C,D)
45 add r7,r7,r10 @ E+=F_00_19(B,C,D)
47 ldrb r10,[r1,#2]
52 orr r9,r9,r10,lsl#8
53 eor r10,r4,r5 @ F_xx_xx
[all …]
/external/libvpx/vp8/common/arm/armv6/
Dloopfilter_v6.asm76 ldr r10, [src], pstep ; p2
89 uqsub8 r6, r9, r10 ; p3 - p2
90 uqsub8 r7, r10, r9 ; p2 - p3
91 uqsub8 r8, r10, r11 ; p2 - p1
92 uqsub8 r10, r11, r10 ; p1 - p2
95 orr r8, r8, r10 ; abs (p2-p1)
102 ldr r10, [src], pstep ; q1
108 uqsub8 r6, r11, r10 ; p1 - q1
109 uqsub8 r7, r10, r11 ; q1 - p1
118 uqsub8 r7, r9, r10 ; q0 - q1
[all …]
Dfilter_v6.asm59 ldrb r10, [r0], #2
66 pkhbt r8, r9, r10, lsl #16 ; r10 | r9
71 pkhbt r10, r10, r11, lsl #16 ; r11 | r10
75 smlad lr, r10, r5, lr
76 ldrb r10, [r0, #1]
82 pkhbt r9, r9, r10, lsl #16 ; r10 | r9
83 pkhbt r10, r10, r11, lsl #16 ; r11 | r10
86 smlad r11, r10, r6, r8
88 ands r10, r7, #0xff ; test loop counter
99 ldrneb r10, [r0], #2
[all …]
Didct_v6.asm12 ; r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r14
71 …smulwb r10, r5, r12 ; ([4] * sinpi8sqrt2) >> 16 1, r10 un 2, r12/r5 ^1 …
76 rsb r11, r11, r10 ; c1 = temp1 - temp2 1 c1
78 …smulwb r10, r4, r12 ; ([4] * cospi8sqrt2minus1) >> 16 1, r10 un 2, r12/r4 ^1 …
82 add r10, r12, r10 ; temp1 1
83 add r3, r10, r3 ; d1 = temp1 + temp2 1 d1
84 add r10, r9, r3 ; a1 + d1 1 a+d
91 strh r10, [r1], #0x2 ; out[0] = a1+d1 1 ++
104 …smulwb r10, r4, r8 ; ([3] * cospi8sqrt2minus1) >> 16 1, r10 un 2, r4/r8 ^1 …
107 add r10, r8, r10 ; temp2 1
[all …]
Diwalsh_v6.asm34 qadd16 r10, r2, r8 ; a1 [1+13 | 0+12]
39 qadd16 r2, r10, r11 ; a1 + b1 [1 | 0]
41 qsub16 r6, r10, r11 ; a1 - b1 [9 | 8]
44 qadd16 r10, r3, r9 ; a1 [3+15 | 2+14]
49 qadd16 r3, r10, r11 ; a1 + b1 [3 | 2]
51 qsub16 r7, r10, r11 ; a1 - b1 [11 | 10]
56 qsubaddx r10, r2, r3 ; [c1|a1] [1-2 | 0+3]
61 qaddsubx r2, r10, r11 ; [b2|c2] [c1+d1 | a1-b1]
62 qaddsubx r3, r11, r10 ; [a2|d2] [b1+a1 | d1-c1]
63 ldr r10, c0x00030003
[all …]
Dsixtappredict8x4_v6.asm54 ldrb r10, [r0, #-1]
62 pkhbt r9, r9, r10, lsl #16 ; r10 | r9
74 pkhbt r10, r10, r6, lsl #16 ; r10 | r9
75 pkhbt r6, r6, r7, lsl #16 ; r11 | r10
76 smlad r11, r10, r5, r11 ; vp8_filter[4], vp8_filter[5]
95 movne r8, r10
97 movne r10, r12
144 smulbt r10, r3, r6
149 smladx r10, r12, r7, r10
154 smladx r10, r11, r8, r10
[all …]
/external/llvm/test/MC/X86/
Dx86_64-bmi-encoding.s9 blsmskq %r11, %r10
17 blsmskq (%rax), %r10
25 blsiq %r11, %r10
33 blsiq (%rax), %r10
41 blsrq %r11, %r10
49 blsrq (%rax), %r10
57 andnq (%rax), %r11, %r10
69 bextrq %r12, (%rax), %r10
73 bextrq %r12, %r11, %r10
85 bzhiq %r12, (%rax), %r10
[all …]
/external/tremolo/Tremolo/
DbitwiseARM.s45 STMFD r13!,{r10,r11,r14}
53 LDR r10,[r3] @ r10= ptr[0]
57 MOV r10,r10,LSR r14 @ r10= ptr[0]>>(32-bitsLeftInWord)
60 ORRLT r10,r10,r11,LSL r14 @ r10= Next 32 bits.
63 AND r0,r10,r14
64 LDMFD r13!,{r10,r11,PC}
68 ADDS r10,r2,r1 @ r10= bitsLeftInSegment + bits (i.e.
70 @ r10 = bitsLeftInSegment (initial)
73 MOV r5,r10 @ r5 = bitsLeftInSegment (initial)
75 BEQ look_next_segment @ r10= r12 = 0, if we branch
[all …]
DmdctARM.s187 LDMFD r12,{r8,r9,r10} @ r8 = step
189 @ r10= wR
198 LDR r6, [r10,#-4]! @ r6 = *--wR
226 LDMFD r12,{r8,r9,r10} @ r8 = step
228 @ r10= wR
237 LDR r6, [r10,#-4]! @ r6 = *--wR
319 LDR r10,[r5],r2,LSL #2 @ r10= T[0] T += step
325 SMLAL r8, r9, r6, r10 @ (r8, r9) += s0*T[0]
328 SMULL r8, r12,r7, r10 @ (r8, r12) = s2*T[0]
341 LDR r10,[r5,#4] @ r10= T[1]
[all …]
DmdctLARM.s187 LDMFD r12,{r8,r9,r10} @ r8 = step
189 @ r10= wR
197 LDRB r6, [r10,#-1]! @ r6 = *--wR
227 LDMFD r12,{r8,r9,r10} @ r8 = step
229 @ r10= wR
238 LDRB r6, [r10,#-1]! @ r6 = *--wR
322 LDRB r10,[r5],r2 @ r10= T[0] T += step
327 MUL r9, r6, r10 @ r9 = s0*T[0]
331 MUL r12,r7, r10 @ r12 = s2*T[0]
340 LDRB r10,[r5,#1] @ r10= T[1]
[all …]
/external/libffi/src/x86/
Dunix64.S48 movq (%rsp), %r10 /* Load return address. */
53 movq %r10, 24(%rax) /* Relocate return address. */
56 movq %rdi, %r10 /* Save a copy of the register area. */
61 movq (%r10), %rdi
62 movq 8(%r10), %rsi
63 movq 16(%r10), %rdx
64 movq 24(%r10), %rcx
65 movq 32(%r10), %r8
66 movq 40(%r10), %r9
72 leaq 176(%r10), %rsp
[all …]
Ddarwin64.S48 movq (%rsp), %r10 /* Load return address. */
53 movq %r10, 24(%rax) /* Relocate return address. */
56 movq %rdi, %r10 /* Save a copy of the register area. */
61 movq (%r10), %rdi
62 movq 8(%r10), %rsi
63 movq 16(%r10), %rdx
64 movq 24(%r10), %rcx
65 movq 32(%r10), %r8
66 movq 40(%r10), %r9
72 leaq 176(%r10), %rsp
[all …]
/external/openssl/crypto/aes/asm/
Dvpaes-x86_64.pl77 ## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax
99 lea .Lk_mc_backward(%rip),%r10
113 movdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[]
117 movdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[]
155 movdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
156 movdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
160 movdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[]
187 lea .Lk_dsbd(%rip),%r10
194 add %r10, %r11
202 movdqa -0x20(%r10),%xmm4 # 4 : sb9u
[all …]
Daes-armv4.s121 sub r10,r3,#AES_encrypt-AES_Te @ Te
235 ldr r4,[r10,r7,lsl#2] @ Te3[s0>>0]
237 ldr r5,[r10,r8,lsl#2] @ Te2[s0>>8]
239 ldr r6,[r10,r9,lsl#2] @ Te1[s0>>16]
241 ldr r0,[r10,r0,lsl#2] @ Te0[s0>>24]
244 ldr r7,[r10,r7,lsl#2] @ Te1[s1>>16]
245 ldr r8,[r10,r8,lsl#2] @ Te3[s1>>0]
246 ldr r9,[r10,r9,lsl#2] @ Te2[s1>>8]
248 ldr r1,[r10,r1,lsl#2] @ Te0[s1>>24]
254 ldr r7,[r10,r7,lsl#2] @ Te2[s2>>8]
[all …]
/external/libvpx/vp8/decoder/arm/armv6/
Ddequant_idct_v6.asm70 smulwt r10, r4, r6
74 pkhbt r8, r8, r10, lsl #16
78 smulwb r10, r4, r12
82 pkhbt r10, r10, r7, lsl #16
85 uadd16 r6, r6, r10
86 uadd16 r10, r11, r14
88 uadd16 r9, r10, r6
89 usub16 r10, r10, r6
95 str r10, [r1, #24]
109 smulwt r10, r4, r8
[all …]
Ddequant_dc_idct_v6.asm92 smulwt r10, r4, r6
96 pkhbt r8, r8, r10, lsl #16
100 smulwb r10, r4, r12
104 pkhbt r10, r10, r7, lsl #16
107 uadd16 r6, r6, r10
108 uadd16 r10, r11, r14
110 uadd16 r9, r10, r6
111 usub16 r10, r10, r6
117 str r10, [r1, #24]
131 smulwt r10, r4, r8
[all …]
/external/libvpx/vp8/encoder/arm/armv6/
Dvp8_subtract_armv6.asm94 uxtb16 r10, r6, ror #8 ; [s3 | s1] (A)
98 usub16 r7, r10, r11 ; [d3 | d1] (A)
100 ldr r10, [r1, #4] ; src (B)
107 uxtb16 r8, r10 ; [s2 | s0] (B)
111 uxtb16 r10, r10, ror #8 ; [s3 | s1] (B)
115 usub16 r7, r10, r11 ; [d3 | d1] (B)
137 uxtb16 r10, r6, ror #8 ; [s3 | s1] (A)
141 usub16 r7, r10, r11 ; [d3 | d1] (A)
143 ldr r10, [r2, #4] ; src (B)
150 uxtb16 r8, r10 ; [s2 | s0] (B)
[all …]
Dwalsh_v6.asm34 qsubaddx r10, r2, r3 ; [c1|a1] [1-2 | 0+3]
39 qaddsubx r2, r10, r11 ; [1 | 2] [c1+d1 | a1-b1]
40 qaddsubx r3, r11, r10 ; [0 | 3] [b1+a1 | d1-c1]
44 qsubaddx r10, r6, r7 ; [c1|a1] [9-10 | 8+11]
49 qaddsubx r6, r10, r11 ; [9 |10] [c1+d1 | a1-b1]
50 qaddsubx r7, r11, r10 ; [8 |11] [b1+a1 | d1-c1]
56 qadd16 r10, r3, r9 ; a1 [0+12 | 3+15]
61 qadd16 r3, r10, r11 ; a2 [a1+b1] [0 | 3]
63 qsub16 r7, r10, r11 ; c2 [a1-b1] [8 |11]
66 qadd16 r10, r2, r8 ; a1 [1+13 | 2+14]
[all …]
Dvp8_variance8x8_armv6.asm25 push {r4-r10, lr}
38 sel r10, r8, lr ; select bytes with positive difference
43 usad8 r6, r10, lr ; calculate sum of positive differences
45 orr r8, r8, r10 ; differences of all 4 pixels
52 uxtb16 r10, r8, ror #8 ; another two pixels to halfwords
58 smlad r5, r10, r10, r5 ; dual signed multiply, add and accumulate (2)
62 sel r10, r8, lr ; select bytes with positive difference
68 usad8 r6, r10, lr ; calculate sum of positive differences
70 orr r8, r8, r10 ; differences of all 4 pixels
78 uxtb16 r10, r8, ror #8 ; another two pixels to halfwords
[all …]
/external/valgrind/main/coregrind/m_mach/
Dmach_traps-amd64-darwin.S40 movq %rcx, %r10
61 movq %rcx, %r10
71 movq %rcx, %r10
81 movq %rcx, %r10
91 movq %rcx, %r10
101 movq %rcx, %r10
111 movq %rcx, %r10
121 movq %rcx, %r10
131 movq %rcx, %r10
/external/libvpx/vp8/common/ppc/
Drecon_altivec.asm65 stvx v2, 0, r10 ;# 2 rows to dst from buf
66 lwz r0, 0(r10)
72 lwz r0, 4(r10)
74 lwz r0, 8(r10)
76 lwz r0, 12(r10)
95 la r10, -48(r1) ;# buf
110 stw r0, 0(r10)
112 stw r0, 4(r10)
114 stw r0, 8(r10)
116 stw r0, 12(r10)
[all …]
/external/libvpx/vp8/encoder/arm/armv5te/
Dvp8_packtokens_partitions_armv5.asm66 ldr r10, [sp, #40] ; ptr
81 str r10, [r0, #vp8_writer_buffer]
112 ldr r10, [sp, #88] ; vp8_coef_tree
133 ldrsb lr, [r10, lr] ; i = vp8_coef_tree[i+bb]
158 mov r10, #0
159 strb r10, [r7, r4] ; w->buffer[x] =(unsigned char)0
169 ldrb r10, [r7, r4] ; w->buffer[x]
170 add r10, r10, #1
171 strb r10, [r7, r4] ; w->buffer[x] + 1
174 ldr r10, [r0, #vp8_writer_buffer]
[all …]
Dvp8_packtokens_armv5.asm60 ldr r10, [sp, #52] ; vp8_coef_tree
81 ldrsb lr, [r10, lr] ; i = vp8_coef_tree[i+bb]
106 mov r10, #0
107 strb r10, [r7, r4] ; w->buffer[x] =(unsigned char)0
117 ldrb r10, [r7, r4] ; w->buffer[x]
118 add r10, r10, #1
119 strb r10, [r7, r4] ; w->buffer[x] + 1
122 ldr r10, [r0, #vp8_writer_buffer]
131 strb r7, [r10, r4] ; w->buffer[w->pos++]
133 ; r10 is used earlier in the loop, but r10 is used as
[all …]
Dvp8_packtokens_mbrow_armv5.asm82 ldr r10, [sp, #60] ; vp8_coef_tree
103 ldrsb lr, [r10, lr] ; i = vp8_coef_tree[i+bb]
128 mov r10, #0
129 strb r10, [r7, r4] ; w->buffer[x] =(unsigned char)0
139 ldrb r10, [r7, r4] ; w->buffer[x]
140 add r10, r10, #1
141 strb r10, [r7, r4] ; w->buffer[x] + 1
144 ldr r10, [r0, #vp8_writer_buffer]
153 strb r7, [r10, r4] ; w->buffer[w->pos++]
155 ; r10 is used earlier in the loop, but r10 is used as
[all …]
/external/libvpx/vpx_scale/arm/neon/
Dvp8_vpxyv12_copysrcframe_func_neon.asm38 add r10, r2, r6 ;second row src
53 vld1.8 {q4, q5}, [r10]!
55 vld1.8 {q6, q7}, [r10]!
57 vld1.8 {q12, q13}, [r10]!
59 vld1.8 {q14, q15}, [r10]!
77 vld1.8 {d1}, [r10]!
91 ldrb r8, [r10], #1
99 add r10, r10, r6
149 add r10, r2, r6 ;second row src
167 vld1.8 {q4, q5}, [r10]!
[all …]

123456789