Home
last modified time | relevance | path

Searched refs:r9 (Results 1 – 25 of 280) sorted by relevance

12345678910>>...12

/external/openssl/crypto/sha/asm/
Dsha1-armv4-large.S23 ldrb r9,[r1,#3]
27 orr r9,r9,r10,lsl#8
29 orr r9,r9,r11,lsl#16
31 orr r9,r9,r12,lsl#24
33 ldr r9,[r1],#4 @ handles unaligned
38 rev r9,r9 @ byte swap
42 add r7,r7,r9 @ E+=X[i]
44 str r9,[r14,#-4]!
48 ldrb r9,[r1,#3]
52 orr r9,r9,r10,lsl#8
[all …]
Dsha512-x86_64.S31 movq 40(%rdi),%r9
44 movq %r9,%r15
91 xorq %r9,%r15
103 xorq %r9,%r15
136 addq %r9,%r12
141 movq %r11,%r9
147 xorq %rax,%r9
153 andq %r10,%r9
158 addq %r15,%r9
161 addq %r12,%r9
[all …]
/external/libvpx/libvpx/vp8/encoder/arm/armv6/
Dwalsh_v6.asm35 ldrd r8, r9, [r0], r2
42 qadd16 r7, r8, r9 ; [d1|a1] [9+11 | 8+10]
43 qsub16 r8, r8, r9 ; [c1|b1] [9-11 | 8-10]
46 qadd16 r9, r10, r11 ; [d1|a1] [13+15 | 12+14]
65 lsls r2, r9, #16
66 smuad r2, r9, lr ; D0 = a1<<2 + d1<<2
106 smusd r9, r9, lr ; D3 = a1<<2 - d1<<2
107 add r7, r5, r9 ; d1_3 = B3 + D3
108 sub r5, r5, r9 ; c1_3 = B3 - D3
113 adds r9, r3, r5 ; b2 = b1_3 + c1_3
[all …]
Dvp8_subtract_armv6.asm29 stmfd sp!, {r4-r9}
40 mov r9, #4 ; loop count
55 subs r9, r9, #1 ; decrement loop counter
66 ldmfd sp!, {r4-r9}
94 uxtb16 r9, r7 ; [p2 | p0] (A)
98 usub16 r6, r8, r9 ; [d2 | d0] (A)
105 pkhtb r9, r7, r6, asr #16 ; [d3 | d2] (A)
109 str r9, [r0], #4 ; diff (A)
111 uxtb16 r9, r11 ; [p2 | p0] (B)
115 usub16 r6, r8, r9 ; [d2 | d0] (B)
[all …]
Dvp8_short_fdct4x4_armv6.asm49 ldrd r8, r9, [r0] ; [i5 | i4] [i7 | i6]
57 ror r9, r9, #16 ; [i6 | i7]
59 qadd16 r6, r8, r9 ; [i5+i6 | i4+i7] = [b1 | a1] without shift
60 qsub16 r7, r8, r9 ; [i5-i6 | i4-i7] = [c1 | d1] without shift
68 smuad r9, r6, lr ; o4 = (i5+i6)*8 + (i4+i7)*8
76 pkhbt r9, r9, r6, lsl #4 ; [o5 | o4], keep in register for PART 2
134 qadd16 r5, r9, r2 ; b1 = [i5+i9 | i4+i8]
135 qsub16 r6, r9, r2 ; c1 = [i5-i9 | i4-i8]
149 lsl r9, r3, #16 ; prepare bottom halfword for scaling
152 pkhtb r5, r3, r9, asr #20 ; pack and scale bottom halfword
[all …]
Dvp8_fast_quantize_b_armv6.asm43 ldr r9, [r3], #4 ; [z1 | z0]
47 ssat16 lr, #1, r9 ; [sz1 | sz0]
48 eor r9, r9, lr ; [z1 ^ sz1 | z0 ^ sz0]
49 ssub16 r9, r9, lr ; x = (z ^ sz) - sz
50 sadd16 r9, r9, r10 ; [x1+r1 | x0+r0]
54 smulbb r0, r9, r11 ; [(x0+r0)*q0]
55 smultt r9, r9, r11 ; [(x1+r1)*q1]
61 pkhtb r0, r9, r0, asr #16 ; [y1 | y0]
62 ldr r9, [r4], #4 ; [q3 | q2]
69 smulbb r10, r12, r9 ; [(x2+r2)*q2]
[all …]
/external/openssl/crypto/modes/asm/
Dghash-x86_64.S21 movq (%rsi,%rax,1),%r9
30 movq %r9,%r10
32 shrq $4,%r9
35 xorq (%rsi,%rbx,1),%r9
37 xorq (%r11,%rdx,8),%r9
46 movq %r9,%r10
47 shrq $4,%r9
50 xorq (%rsi,%rax,1),%r9
52 xorq (%r11,%rdx,8),%r9
61 movq %r9,%r10
[all …]
/external/tremolo/Tremolo/
DmdctARM.s187 LDMFD r12,{r8,r9,r10} @ r8 = step
188 @ r9 = wL
196 LDR r11,[r9],#4 @ r11= *wL++
226 LDMFD r12,{r8,r9,r10} @ r8 = step
227 @ r9 = wL
235 LDR r11,[r9],#4 @ r11= *wL++
324 SMULL r8, r9, r7, r11 @ (r8, r9) = s2*T[1]
327 SMLAL r8, r9, r6, r10 @ (r8, r9) += s0*T[0]
331 MOV r9, r9, LSL #1
334 STR r9, [r4],#-16 @ aX[0] = r9
[all …]
DmdctLARM.s187 LDMFD r12,{r8,r9,r10} @ r8 = step
188 @ r9 = wL
198 LDRB r11,[r9],#1 @ r11= *wL++
227 LDMFD r12,{r8,r9,r10} @ r8 = step
228 @ r9 = wL
237 LDRB r11,[r9],#1 @ r11= *wL++
327 MUL r9, r6, r10 @ r9 = s0*T[0]
329 MLA r9, r7, r11,r9 @ r9 += s2*T[1]
332 STR r9, [r4,#16] @ aX[0] = r9
347 MUL r9, r6, r10 @ r9 = s0*T[1]
[all …]
/external/openssl/crypto/aes/asm/
Daes-armv4.S232 and r9,lr,r0,lsr#16
239 ldr r6,[r10,r9,lsl#2] @ Te1[s0>>16]
240 and r9,lr,r1,lsr#8
246 ldr r9,[r10,r9,lsl#2] @ Te2[s1>>8]
252 eor r6,r6,r9,ror#8
253 and r9,lr,r2
259 ldr r9,[r10,r9,lsl#2] @ Te3[s2>>0]
265 eor r6,r6,r9,ror#16
266 and r9,lr,r3,lsr#16 @ i2
272 ldr r9,[r10,r9,lsl#2] @ Te1[s3>>16]
[all …]
Daes-x86_64.S209 movzbl (%r14,%r9,1),%r9d
380 movq 16(%rsp),%r9
382 movl %eax,0(%r9)
383 movl %ebx,4(%r9)
384 movl %ecx,8(%r9)
385 movl %edx,12(%r9)
607 movzbl (%r14,%r9,1),%r9d
656 movq %rbx,%r9
658 shrq $7,%r9
662 subq %r9,%rbx
[all …]
/external/aac/libFDK/src/arm/
Ddct_arm.cpp112 PUSH {r4-r9} in dct_IV_func1()
121 SMULWT r9, r5, r8 // accuX = accu2*val_tw.l in dct_IV_func1()
123 RSB r9, r9, #0 // accuX =-accu2*val_tw.l in dct_IV_func1()
125 SMLAWB r4, r4, r8, r9 // accu1 = accu1*val_tw.h - accu2*val_tw.l in dct_IV_func1()
131 SMULWB r9, r7, r8 // accuX = accu4*val_tw.h in dct_IV_func1()
133 RSB r9, r9, #0 // accuX =-accu4*val_tw.h in dct_IV_func1()
135 SMLAWT r6, r6, r8, r9 // accu3 = accu3*val_tw.l-accu4*val_tw.h in dct_IV_func1()
147 SMULWT r9, r5, r8 // accuX = accu2*val_tw.l in dct_IV_func1()
149 RSB r9, r9, #0 // accuX =-accu2*val_tw.l in dct_IV_func1()
151 SMLAWB r4, r4, r8, r9 // accu1 = accu1*val_tw.h - accu2*val_tw.l in dct_IV_func1()
[all …]
/external/libvpx/libvpx/vp8/common/arm/armv6/
Dfilter_v6.asm55 ldrb r9, [r0, #-1]
62 pkhbt lr, r8, r9, lsl #16 ; r9 | r8
63 pkhbt r8, r9, r10, lsl #16 ; r10 | r9
65 ldrb r9, [r0]
70 pkhbt r11, r11, r9, lsl #16 ; r9 | r11
79 pkhbt r9, r9, r10, lsl #16 ; r10 | r9
82 smlad lr, r9, r6, lr
91 ldrneb r9, [r0, #-1]
145 ldrb r9, [r0, #-1]
152 pkhbt lr, r8, r9, lsl #16 ; r9 | r8
[all …]
Dintra4x4_predict_v6.asm54 mov r9, #0
57 usad8 r12, r8, r9
83 ldrb r9, [sp, #48] ; top_left
91 add r9, r9, r9, lsl #16 ; [tl|tl]
94 ssub16 r10, r10, r9 ; a[2|0] - [tl|tl]
95 ssub16 r11, r11, r9 ; a[3|1] - [tl|tl]
143 ldrb r9, [sp, #48] ; top_left
151 pkhbt r9, r9, r5, lsl #16 ; a[1|-1]
153 add r9, r9, r4, lsl #1 ;[a[1]+2*a[2] | tl+2*a[0] ]
154 uxtab16 r9, r9, r5 ;[a[1]+2*a[2]+a[3] | tl+2*a[0]+a[1] ]
[all …]
Ddequant_idct_v6.asm65 smulwt r9, r3, r6
69 pkhbt r7, r7, r9, lsl #16
74 smulwb r9, r3, r12
77 pkhbt r9, r9, r11, lsl #16
80 uadd16 r7, r12, r9
85 uadd16 r9, r10, r6
93 str r9, [r1], #4
102 ldr r9, [r0], #4
112 pkhbt lr, r9, r7, lsl #16
115 pkhtb r8, r7, r9, asr #16
[all …]
Dloopfilter_v6.asm71 ldr r9, [src], pstep ; p3
90 uqsub8 r6, r9, r10 ; p3 - p2
91 uqsub8 r7, r10, r9 ; p2 - p3
102 ldr r9, [src], pstep ; q0
111 uqsub8 r11, r12, r9 ; p0 - q0
112 uqsub8 r12, r9, r12 ; q0 - p0
119 uqsub8 r7, r9, r10 ; q0 - q1
121 uqsub8 r6, r10, r9 ; q1 - q0
123 uqsub8 r9, r11, r10 ; q2 - q1
130 orr r10, r9, r10 ; abs (q2-q1)
[all …]
Dbilinearfilter_v6.asm56 ldrb r9, [r0, #3]
63 pkhbt r8, r8, r9, lsl #16 ; src[3] | src[2]
65 pkhbt r9, r9, r10, lsl #16 ; src[4] | src[3]
68 smuad r9, r9, r5
82 add r9, r9, #0x40
84 usat r9, #16, r9, asr #7
89 strh r9, [r1], r3
99 add r9, r2, r4, lsl #1 ; adding back block width
100 pld [r0, r9] ; preload next row
117 ldrb r9, [r0, #3]
[all …]
Dsimpleloopfilter_v6.asm69 mov r9, #4 ; double the count. we're doing 4 at a time
173 pkhbt r9, r3, r4, lsl #16
176 ;transpose r7, r8, r9, r10 to r3, r4, r5, r6
177 TRANSPOSE_MATRIX r7, r8, r9, r10, r3, r4, r5, r6
181 uqsub8 r9, r4, r5 ; p0 - q0
184 orr r9, r9, r10 ; abs(p0 - q0)
186 uqadd8 r9, r9, r9 ; abs(p0 - q0) * 2
188 uqadd8 r7, r7, r9 ; abs(p0 - q0)*2 + abs(p1 - q1)/2
207 ldr r9, c0x03030303 ; r9 = 3
216 qadd8 r9 , r3 , r9 ; Filter2 = vp8_filter + 3
[all …]
Diwalsh_v6.asm31 ldr r9, [r0, #28] ; [15 | 14]
43 qadd16 r10, r3, r9 ; a1 [3+15 | 2+14]
46 qsub16 lr, r3, r9 ; d1 [3-15 | 2-14]
51 qsub16 r9, lr, r12 ; d1 - c1 [15 | 14]
95 qsubaddx r4, r8, r9 ; [c1|a1] [13-14 | 12+15]
96 qaddsubx r5, r8, r9 ; [b1|d1] [13+14 | 12-15]
101 qaddsubx r9, r5, r4 ; [a2|d2] [b1+a1 | d1-c1]
106 qadd16 r9, r9, r10 ; [a2+3|d2+3]
119 asr r12, r9, #19 ; [12]
124 sxth r9, r9
[all …]
Didct_v6.asm42 smulbt r9, r5, r6 ; (ip[5] * cospi8sqrt2minus1) >> 16
48 pkhtb r7, r9, r7, asr #16 ; 5c | 4c
53 smulbb r9, r5, r12 ; (ip[12] * cospi8sqrt2minus1) >> 16
58 pkhtb r9, r11, r9, asr #16 ; 13c | 12c
61 uadd16 r7, r12, r9 ; 13c+13 | 12c+12
68 uadd16 r9, r10, r6 ; a+d
77 str r9, [r0], #4 ; o1 | o0
91 smulbt r9, r5, r6 ; (ip[5] * cospi8sqrt2minus1) >> 16
97 pkhtb r7, r7, r9, asr #16 ; 1c | 5c
102 pkhbt r9, r14, r12, lsl #16 ; i2 | i6
[all …]
/external/valgrind/main/coregrind/m_syswrap/
Dsyscall-s390x-linux.S97 stmg %r2,%r9, SP_R2(%r15)
108 lg %r9, SP_R3(%r15) /* guest state --> r9 */
109 lg %r2, OFFSET_s390x_r2(%r9) /* guest r2 --> real r2 */
110 lg %r3, OFFSET_s390x_r3(%r9) /* guest r3 --> real r3 */
111 lg %r4, OFFSET_s390x_r4(%r9) /* guest r4 --> real r4 */
112 lg %r5, OFFSET_s390x_r5(%r9) /* guest r5 --> real r5 */
113 lg %r6, OFFSET_s390x_r6(%r9) /* guest r6 --> real r6 */
114 lg %r7, OFFSET_s390x_r7(%r9) /* guest r7 --> real r7 */
120 stg %r2, OFFSET_s390x_r2(%r9)
135 lmg %r6,%r9, SP_R6(%r15)
[all …]
/external/llvm/test/MC/ARM/
Dbasic-arm-instructions.s62 adc r6, r7, r8, lsl r9
63 adc r6, r7, r8, lsr r9
64 adc r6, r7, r8, asr r9
65 adc r6, r7, r8, ror r9
81 adc r6, r7, lsl r9
82 adc r6, r7, lsr r9
83 adc r6, r7, asr r9
84 adc r6, r7, ror r9
100 @ CHECK: adc r6, r7, r8, lsl r9 @ encoding: [0x18,0x69,0xa7,0xe0]
101 @ CHECK: adc r6, r7, r8, lsr r9 @ encoding: [0x38,0x69,0xa7,0xe0]
[all …]
/external/libvpx/libvpx/vpx_scale/arm/neon/
Dvp8_vpxyv12_copyframe_func_neon.asm32 ldr r9, [r1, #yv12_buffer_config_u_buffer] ;srcptr1
44 str r9, [sp, #4]
53 mov r9, r3
71 vst1.8 {q0, q1}, [r9]!
73 vst1.8 {q2, q3}, [r9]!
75 vst1.8 {q4, q5}, [r9]!
77 vst1.8 {q6, q7}, [r9]!
113 mov r9, r3
127 vst1.8 {q0, q1}, [r9]!
129 vst1.8 {q2, q3}, [r9]!
[all …]
/external/v8/src/arm/
Dcodegen-arm.cc91 r9, in GenerateSmiOnlyToObject()
126 __ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS); in GenerateSmiOnlyToDouble()
129 __ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex); in GenerateSmiOnlyToDouble()
131 __ str(r9, MemOperand(r6, HeapObject::kMapOffset)); in GenerateSmiOnlyToDouble()
138 r9, in GenerateSmiOnlyToDouble()
149 r9, in GenerateSmiOnlyToDouble()
175 r9, in GenerateSmiOnlyToDouble()
189 __ ldr(r9, MemOperand(r3, 4, PostIndex)); in GenerateSmiOnlyToDouble()
191 __ UntagAndJumpIfNotSmi(r9, r9, &convert_hole); in GenerateSmiOnlyToDouble()
196 __ vmov(s0, r9); in GenerateSmiOnlyToDouble()
[all …]
/external/libvpx/libvpx/vp8/encoder/arm/armv5te/
Dboolhuff_armv5te.asm98 mov r9, #0
99 strb r9, [r7, r4] ; w->buffer[x] =(unsigned char)0
109 ldrb r9, [r7, r4] ; w->buffer[x]
110 add r9, r9, #1
111 strb r9, [r7, r4] ; w->buffer[x] + 1
114 ldr r9, [r0, #vp8_writer_buffer]
124 VALIDATE_POS r9, r1 ; validate_buffer at pos
126 strb r7, [r9, r4] ; w->buffer[w->pos++]
174 mov r9, #0
175 strb r9, [r7, r4] ; w->buffer[x] =(unsigned char)0
[all …]

12345678910>>...12