/external/tremolo/Tremolo/ |
D | mdctARM.s | 186 STMFD r13!,{r4,r6-r11,r14} 196 LDR r11,[r9],#4 @ r11= *wL++ 201 SMULL r14,r11,r12,r11 @ (r14,r11) = *l * *wL++ 203 ADD r6, r6, r11 214 LDMFD r13!,{r4,r6-r11,PC} 225 STMFD r13!,{r4,r6-r11,r14} 235 LDR r11,[r9],#4 @ r11= *wL++ 240 SMULL r14,r11,r12,r11 @ (r14,r11) = *l * *wL++ 242 SUB r6, r6, r11 253 LDMFD r13!,{r4,r6-r11,PC} [all …]
|
D | mdctLARM.s | 186 STMFD r13!,{r4,r6-r11,r14} 198 LDRB r11,[r9],#1 @ r11= *wL++ 202 MUL r11,r12,r11 @ r11 = *l * *wL++ 204 MLA r6, r7, r6, r11 @ r6 = *--r * *--wR 215 LDMFD r13!,{r4,r6-r11,PC} 226 STMFD r13!,{r4,r6-r11,r14} 237 LDRB r11,[r9],#1 @ r11= *wL++ 242 MUL r11,r12,r11 @ (r14,r11) = *l * *wL++ 245 SUB r6, r6, r11 256 LDMFD r13!,{r4,r6-r11,PC} [all …]
|
D | bitwiseARM.s | 45 STMFD r13!,{r10,r11,r14} 56 LDRLT r11,[r3,#4]! @ r11= ptr[1] 60 ORRLT r10,r10,r11,LSL r14 @ r10= Next 32 bits. 64 LDMFD r13!,{r10,r11,PC} 80 MOV r11,#1 83 RSB r11,r11,r11,LSL r5 @ r11= mask 84 AND r10,r10,r11 @ r10= first r5 bits 88 LDR r11,[r0,#12] @ r11= head = b->head 92 LDR r11,[r11,#12] @ r11= head = head->next 95 CMP r11,#0 [all …]
|
D | dpen.s | 234 STMFD r13!,{r4-r11,r14} 258 LDR r11,[r4,#28] @ r11= s->q_minp 261 SUBS r11,r7, r11 @ r11= add = point - s->q_minp 263 MOVGT r14,r14,ASR r11 @ r14= add = s->q_min >> add (if add >0) 264 RSBLT r11,r11,#0 265 MOVLT r14,r14,LSL r11 @ r14= add = s->q_min << -add (if add < 0) 294 MOV r11,#0 @ r11= prev = 0 300 ADD r1, r11,r1, ASR r5 @ r1 = v = prev+((add+mul*v)>>shiftM) 301 AND r11,r1, r7 @ r11= prev = seqMask & v 306 LDMFD r13!,{r4-r11,PC} [all …]
|
D | floor1ARM.s | 48 STMFD r13!,{r4-r6,r11,r14} 49 LDMFD r12,{r11,r12,r14} @ r11 = err 55 SUBS r11,r11,r14 @ err -= ady 56 ADDLT r11,r11,r12 @ if (err < 0) err+=adx 65 LDMFD r13!,{r4-r6,r11,PC}
|
D | floor1LARM.s | 48 STMFD r13!,{r4-r6,r11,r14} 49 LDMFD r12,{r11,r12,r14} @ r11 = err 55 SUBS r11,r11,r14 @ err -= ady 58 ADDLT r11,r11,r12 @ if (err < 0) err+=adx 64 LDMFD r13!,{r4-r6,r11,PC}
|
/external/libvpx/vp8/common/arm/armv6/ |
D | filter_v6.asm | 30 stmdb sp!, {r4 - r11, lr} 32 ldr r11, [sp, #40] ; vp8_filter address 48 ldr r4, [r11] ; load up packed filter coefficients 49 ldr r5, [r11, #4] 50 ldr r6, [r11, #8] 63 ldrb r11, [r0, #-1] 71 pkhbt r10, r10, r11, lsl #16 ; r11 | r10 73 pkhbt r11, r11, r9, lsl #16 ; r9 | r11 77 smlad r8, r11, r5, r8 78 ldrb r11, [r0, #2] [all …]
|
D | loopfilter_v6.asm | 67 stmdb sp!, {r4 - r11, lr} 78 ldr r11, [src], pstep ; p1 91 uqsub8 r8, r10, r11 ; p2 - p1 92 uqsub8 r10, r11, r10 ; p1 - p2 98 uqsub8 r6, r11, r12 ; p1 - p0 100 uqsub8 r7, r12, r11 ; p0 - p1 108 uqsub8 r6, r11, r10 ; p1 - q1 109 uqsub8 r7, r10, r11 ; q1 - p1 110 uqsub8 r11, r12, r9 ; p0 - q0 114 orr r12, r11, r12 ; abs (p0-q0) [all …]
|
D | idct_v6.asm | 12 ; r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r14 58 stmdb sp!, {r4-r11, lr} ; backup registers 1 backup 72 …smulwb r11, r4, r3 ; ([12] * cospi8sqrt2minus1) >> 16 1, r11 un 2, r3/r4 ^1 … 75 add r11, r3, r11 ; temp2 1 76 rsb r11, r11, r10 ; c1 = temp1 - temp2 1 c1 79 add r8, r7, r11 ; b1 + c1 1 b+c 81 sub r7, r7, r11 ; b1 - c1 1 b-c 99 ldrsh r11, [r1, #2] ; input[1] 1, r11 un 2 [1] 103 …smulwb r9, r5, r11 ; ([1] * sinpi8sqrt2) >> 16 1, r9 un 2, r5/r11 ^1 … 110 …smulwb r10, r4, r11 ; ([1] * cospi8sqrt2minus1) >> 16 1, r10 un 2, r4/r11 ^1 … [all …]
|
D | bilinearfilter_v6.asm | 27 stmdb sp!, {r4 - r11, lr} 29 ldr r11, [sp, #40] ; vp8_filter address 39 ldr r5, [r11] ; load up filter coefficients 44 mov r11, r1 ; save output_ptr for each row 103 add r11, r11, #2 ; move over to next column 104 mov r1, r11 108 ldmia sp!, {r4 - r11, pc} 131 add r11, r11, #2 ; move over to next column 132 mov r1, r11 136 ldmia sp!, {r4 - r11, pc} [all …]
|
D | sixtappredict8x4_v6.asm | 27 stmdb sp!, {r4 - r11, lr} 65 smuad r11, r6, r3 ; vp8_filter[0], vp8_filter[1] 70 smlad r11, r8, r4, r11 ; vp8_filter[2], vp8_filter[3] 75 pkhbt r6, r6, r7, lsl #16 ; r11 | r10 76 smlad r11, r10, r5, r11 ; vp8_filter[4], vp8_filter[5] 81 add r11, r11, #0x40 ; round_shift_and_clamp 83 usat r11, #8, r11, asr #7 85 strh r11, [lr], #20 ; result is transposed and stored, which 90 movne r11, r6 96 movne r9, r11 [all …]
|
D | iwalsh_v6.asm | 23 stmdb sp!, {r4 - r11, lr} 35 qadd16 r11, r4, r6 ; b1 [5+9 | 4+8] 39 qadd16 r2, r10, r11 ; a1 + b1 [1 | 0] 41 qsub16 r6, r10, r11 ; a1 - b1 [9 | 8] 45 qadd16 r11, r5, r7 ; b1 [7+11 | 6+10] 49 qadd16 r3, r10, r11 ; a1 + b1 [3 | 2] 51 qsub16 r7, r10, r11 ; a1 - b1 [11 | 10] 57 qaddsubx r11, r2, r3 ; [b1|d1] [1+2 | 0-3] 61 qaddsubx r2, r10, r11 ; [b2|c2] [c1+d1 | a1-b1] 62 qaddsubx r3, r11, r10 ; [a2|d2] [b1+a1 | d1-c1] [all …]
|
/external/openssl/crypto/sha/asm/ |
D | sha512-armv4.s | 61 ldr r11, [r0,#56+4] 66 str r11, [sp,#56+0] 74 ldr r11, [r0,#24+4] 80 str r11, [sp,#24+0] 91 ldrb r11, [r1,#4] 98 orr r3,r3,r11,lsl#24 104 ldr r11,[sp,#56+0] @ h.lo 123 adds r3,r3,r11 128 ldr r11,[sp,#48+0] @ g.lo 135 eor r9,r9,r11 [all …]
|
D | sha1-armv4-large.s | 21 ldrb r11,[r1,#-2] 26 orr r9,r11,r9,lsl#8 36 ldrb r11,[r1,#-2] 41 orr r9,r11,r9,lsl#8 51 ldrb r11,[r1,#-2] 56 orr r9,r11,r9,lsl#8 66 ldrb r11,[r1,#-2] 71 orr r9,r11,r9,lsl#8 81 ldrb r11,[r1,#-2] 86 orr r9,r11,r9,lsl#8 [all …]
|
D | sha256-armv4.s | 31 ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11} 53 add r3,r3,r11 55 mov r11,r4,ror#2 56 eor r11,r11,r4,ror#13 57 eor r11,r11,r4,ror#22 @ Sigma0(a) 62 add r11,r11,r0 64 add r11,r11,r3 85 mov r10,r11,ror#2 86 eor r10,r10,r11,ror#13 87 eor r10,r10,r11,ror#22 @ Sigma0(a) [all …]
|
/external/libvpx/vp8/encoder/arm/armv6/ |
D | walsh_v6.asm | 22 stmdb sp!, {r4 - r11, lr} 35 qaddsubx r11, r2, r3 ; [b1|d1] [1+2 | 0-3] 39 qaddsubx r2, r10, r11 ; [1 | 2] [c1+d1 | a1-b1] 40 qaddsubx r3, r11, r10 ; [0 | 3] [b1+a1 | d1-c1] 45 qaddsubx r11, r6, r7 ; [b1|d1] [9+10 | 8-11] 49 qaddsubx r6, r10, r11 ; [9 |10] [c1+d1 | a1-b1] 50 qaddsubx r7, r11, r10 ; [8 |11] [b1+a1 | d1-c1] 57 qadd16 r11, r5, r7 ; b1 [4+8 | 7+11] 61 qadd16 r3, r10, r11 ; a2 [a1+b1] [0 | 3] 63 qsub16 r7, r10, r11 ; c2 [a1-b1] [8 |11] [all …]
|
/external/libffi/src/s390/ |
D | sysv.S | 52 lr %r11,%r15 # Set up frame pointer 59 l %r7,96(%r11) # Load function address 60 st %r11,0(%r15) # Set up back chain 61 ahi %r11,-48 # Register save area 68 lm %r2,%r6,0(%r11) # Load arguments 69 ld %f0,32(%r11) 70 ld %f2,40(%r11) 75 l %r4,48+56(%r11) 76 lm %r6,%r15,48+24(%r11) 80 l %r4,48+56(%r11) [all …]
|
/external/libvpx/vpx_scale/arm/neon/ |
D | vp8_vpxyv12_copyframe_func_neon.asm | 24 push {r4 - r11, lr} 33 ldr r11, [r1, #yv12_buffer_config_v_buffer] ;srcptr1 45 str r11, [sp, #12] 54 add r11, r3, r7 70 vst1.8 {q8, q9}, [r11]! 72 vst1.8 {q10, q11}, [r11]! 74 vst1.8 {q12, q13}, [r11]! 76 vst1.8 {q14, q15}, [r11]! 87 sub r11, r5, r10 112 add r11, r3, r7 [all …]
|
D | vp8_vpxyv12_copysrcframe_func_neon.asm | 27 push {r4 - r11, lr} 39 add r11, r3, r7 ;second row dst 63 vst1.8 {q4, q5}, [r11]! 65 vst1.8 {q6, q7}, [r11]! 67 vst1.8 {q12, q13}, [r11]! 69 vst1.8 {q14, q15}, [r11]! 81 vst1.8 {d1}, [r11]! 92 strb r8, [r11], #1 100 add r11, r11, r7 150 add r11, r3, r7 ;second row dst [all …]
|
D | vp8_vpxyv12_copyframeyonly_neon.asm | 27 push {r4 - r11, lr} 44 add r11, r3, r7 60 vst1.8 {q8, q9}, [r11]! 62 vst1.8 {q10, q11}, [r11]! 64 vst1.8 {q12, q13}, [r11]! 66 vst1.8 {q14, q15}, [r11]! 77 sub r11, r5, r10 195 pop {r4 - r11, pc} 325 pop {r4 - r11, pc} 365 add r2, r2, r11 [all …]
|
/external/libvpx/vp8/decoder/arm/armv6/ |
D | dequant_idct_v6.asm | 24 stmdb sp!, {r4-r11, lr} 73 smulwt r11, r3, r12 80 pkhbt r9, r9, r11, lsl #16 81 ldr r11, [r0], #4 86 uadd16 r10, r11, r14 87 usub16 r8, r11, r14 110 pkhbt r11, r8, r6, lsl #16 116 uadd16 r10, r11, lr 117 usub16 lr, r11, lr 122 smulwt r11, r4, r8 [all …]
|
D | dequant_dc_idct_v6.asm | 27 stmdb sp!, {r4-r11, lr} 95 smulwt r11, r3, r12 102 pkhbt r9, r9, r11, lsl #16 103 ldr r11, [r0], #4 108 uadd16 r10, r11, r14 109 usub16 r8, r11, r14 132 pkhbt r11, r8, r6, lsl #16 138 uadd16 r10, r11, lr 139 usub16 lr, r11, lr 144 smulwt r11, r4, r8 [all …]
|
/external/openssl/crypto/aes/asm/ |
D | aes-armv4.s | 119 mov r11,r2 193 ldr r4,[r11],#16 194 ldr r5,[r11,#-12] 195 ldr r6,[r11,#-8] 196 ldr r7,[r11,#-4] 197 ldr r12,[r11,#240-16] 254 ldr r4,[r11],#16 255 ldr r5,[r11,#-12] 256 ldr r6,[r11,#-8] 257 ldr r7,[r11,#-4] [all …]
|
/external/openssl/crypto/bn/asm/ |
D | armv4-mont.s | 31 umull r10,r11,r5,r2 @ ap[0]*bp[0] 40 mov r10,r11 41 mov r11,#0 42 umlal r10,r11,r5,r2 @ ap[j]*bp[0] 52 adds r12,r12,r11 70 mov r11,#0 71 umlal r10,r11,r5,r2 @ ap[0]*bp[i]+tp[0] 80 adds r10,r11,r7 @ +=tp[j] 81 mov r11,#0 82 umlal r10,r11,r5,r2 @ ap[j]*bp[i] [all …]
|
/external/libvpx/vp8/encoder/arm/armv5te/ |
D | vp8_packtokens_partitions_armv5.asm | 31 push {r4-r11, lr} 59 ldr r11, _VP8_COMP_bc2_ ; load up vp8_writer out of cpi 60 add r0, r0, r11 62 mov r11, #0 63 str r11, [sp, #28] ; i 162 ldrb r11, [r7, r4] 163 cmpge r11, #0xff 177 add r11, r4, #1 ; w->pos++ 179 str r11, [r0, #vp8_writer_pos] 258 ldrb r11, [r7, r4] [all …]
|