/external/libvpx/vp8/common/arm/armv6/ |
D | filter_v6.asm | 30 stmdb sp!, {r4 - r11, lr} 32 ldr r11, [sp, #40] ; vp8_filter address 48 ldr r4, [r11] ; load up packed filter coefficients 49 ldr r5, [r11, #4] 50 ldr r6, [r11, #8] 63 ldrb r11, [r0, #-1] 71 pkhbt r10, r10, r11, lsl #16 ; r11 | r10 73 pkhbt r11, r11, r9, lsl #16 ; r9 | r11 77 smlad r8, r11, r5, r8 78 ldrb r11, [r0, #2] [all …]
|
D | loopfilter_v6.asm | 67 stmdb sp!, {r4 - r11, lr} 78 ldr r11, [src], pstep ; p1 91 uqsub8 r8, r10, r11 ; p2 - p1 92 uqsub8 r10, r11, r10 ; p1 - p2 98 uqsub8 r6, r11, r12 ; p1 - p0 100 uqsub8 r7, r12, r11 ; p0 - p1 108 uqsub8 r6, r11, r10 ; p1 - q1 109 uqsub8 r7, r10, r11 ; q1 - p1 110 uqsub8 r11, r12, r9 ; p0 - q0 114 orr r12, r11, r12 ; abs (p0-q0) [all …]
|
D | idct_v6.asm | 12 ; r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r14 58 stmdb sp!, {r4-r11, lr} ; backup registers 1 backup 72 …smulwb r11, r4, r3 ; ([12] * cospi8sqrt2minus1) >> 16 1, r11 un 2, r3/r4 ^1 … 75 add r11, r3, r11 ; temp2 1 76 rsb r11, r11, r10 ; c1 = temp1 - temp2 1 c1 79 add r8, r7, r11 ; b1 + c1 1 b+c 81 sub r7, r7, r11 ; b1 - c1 1 b-c 99 ldrsh r11, [r1, #2] ; input[1] 1, r11 un 2 [1] 103 …smulwb r9, r5, r11 ; ([1] * sinpi8sqrt2) >> 16 1, r9 un 2, r5/r11 ^1 … 110 …smulwb r10, r4, r11 ; ([1] * cospi8sqrt2minus1) >> 16 1, r10 un 2, r4/r11 ^1 … [all …]
|
D | bilinearfilter_v6.asm | 27 stmdb sp!, {r4 - r11, lr} 29 ldr r11, [sp, #40] ; vp8_filter address 39 ldr r5, [r11] ; load up filter coefficients 44 mov r11, r1 ; save dst_ptr for each row 103 add r11, r11, #2 ; move over to next column 104 mov r1, r11 108 ldmia sp!, {r4 - r11, pc} 131 add r11, r11, #2 ; move over to next column 132 mov r1, r11 136 ldmia sp!, {r4 - r11, pc} [all …]
|
D | sixtappredict8x4_v6.asm | 50 stmdb sp!, {r4 - r11, lr} 88 smuad r11, r6, r3 ; vp8_filter[0], vp8_filter[1] 93 smlad r11, r8, r4, r11 ; vp8_filter[2], vp8_filter[3] 98 pkhbt r6, r6, r7, lsl #16 ; r11 | r10 99 smlad r11, r10, r5, r11 ; vp8_filter[4], vp8_filter[5] 104 add r11, r11, #0x40 ; round_shift_and_clamp 106 usat r11, #8, r11, asr #7 108 strh r11, [lr], #20 ; result is transposed and stored, which 113 movne r11, r6 119 movne r9, r11 [all …]
|
/external/tremolo/Tremolo/ |
D | mdctARM.s | 186 STMFD r13!,{r4,r6-r11,r14} 196 LDR r11,[r9],#4 @ r11= *wL++ 201 SMULL r14,r11,r12,r11 @ (r14,r11) = *l * *wL++ 203 ADD r6, r6, r11 214 LDMFD r13!,{r4,r6-r11,PC} 225 STMFD r13!,{r4,r6-r11,r14} 235 LDR r11,[r9],#4 @ r11= *wL++ 240 SMULL r14,r11,r12,r11 @ (r14,r11) = *l * *wL++ 242 SUB r6, r6, r11 253 LDMFD r13!,{r4,r6-r11,PC} [all …]
|
D | mdctLARM.s | 186 STMFD r13!,{r4,r6-r11,r14} 198 LDRB r11,[r9],#1 @ r11= *wL++ 202 MUL r11,r12,r11 @ r11 = *l * *wL++ 204 MLA r6, r7, r6, r11 @ r6 = *--r * *--wR 215 LDMFD r13!,{r4,r6-r11,PC} 226 STMFD r13!,{r4,r6-r11,r14} 237 LDRB r11,[r9],#1 @ r11= *wL++ 242 MUL r11,r12,r11 @ (r14,r11) = *l * *wL++ 245 SUB r6, r6, r11 256 LDMFD r13!,{r4,r6-r11,PC} [all …]
|
D | bitwiseARM.s | 45 STMFD r13!,{r10,r11,r14} 56 LDRLT r11,[r3,#4]! @ r11= ptr[1] 60 ORRLT r10,r10,r11,LSL r14 @ r10= Next 32 bits. 64 LDMFD r13!,{r10,r11,PC} 80 MOV r11,#1 83 RSB r11,r11,r11,LSL r5 @ r11= mask 84 AND r10,r10,r11 @ r10= first r5 bits 88 LDR r11,[r0,#12] @ r11= head = b->head 92 LDR r11,[r11,#12] @ r11= head = head->next 95 CMP r11,#0 [all …]
|
D | dpen.s | 234 STMFD r13!,{r4-r11,r14} 258 LDR r11,[r4,#28] @ r11= s->q_minp 261 SUBS r11,r7, r11 @ r11= add = point - s->q_minp 263 MOVGT r14,r14,ASR r11 @ r14= add = s->q_min >> add (if add >0) 264 RSBLT r11,r11,#0 265 MOVLT r14,r14,LSL r11 @ r14= add = s->q_min << -add (if add < 0) 294 MOV r11,#0 @ r11= prev = 0 300 ADD r1, r11,r1, ASR r5 @ r1 = v = prev+((add+mul*v)>>shiftM) 301 AND r11,r1, r7 @ r11= prev = seqMask & v 306 LDMFD r13!,{r4-r11,PC} [all …]
|
/external/llvm/test/MC/X86/ |
D | x86_64-bmi-encoding.s | 9 blsmskq %r11, %r10 25 blsiq %r11, %r10 41 blsrq %r11, %r10 57 andnq (%rax), %r11, %r10 73 bextrq %r12, %r11, %r10 89 bzhiq %r12, %r11, %r10 101 pextq %r12, %r11, %r10 105 pextq (%rax), %r11, %r10 117 pdepq %r12, %r11, %r10 121 pdepq (%rax), %r11, %r10 [all …]
|
/external/openssl/crypto/sha/asm/ |
D | sha1-armv4-large.s | 24 ldrb r11,[r1,#1] 29 orr r9,r9,r11,lsl#16 49 ldrb r11,[r1,#1] 54 orr r9,r9,r11,lsl#16 74 ldrb r11,[r1,#1] 79 orr r9,r9,r11,lsl#16 99 ldrb r11,[r1,#1] 104 orr r9,r9,r11,lsl#16 124 ldrb r11,[r1,#1] 129 orr r9,r9,r11,lsl#16 [all …]
|
/external/libvpx/vp8/encoder/arm/armv6/ |
D | walsh_v6.asm | 22 stmdb sp!, {r4 - r11, lr} 35 qaddsubx r11, r2, r3 ; [b1|d1] [1+2 | 0-3] 39 qaddsubx r2, r10, r11 ; [1 | 2] [c1+d1 | a1-b1] 40 qaddsubx r3, r11, r10 ; [0 | 3] [b1+a1 | d1-c1] 45 qaddsubx r11, r6, r7 ; [b1|d1] [9+10 | 8-11] 49 qaddsubx r6, r10, r11 ; [9 |10] [c1+d1 | a1-b1] 50 qaddsubx r7, r11, r10 ; [8 |11] [b1+a1 | d1-c1] 57 qadd16 r11, r5, r7 ; b1 [4+8 | 7+11] 61 qadd16 r3, r10, r11 ; a2 [a1+b1] [0 | 3] 63 qsub16 r7, r10, r11 ; c2 [a1-b1] [8 |11] [all …]
|
D | vp8_subtract_armv6.asm | 95 uxtb16 r11, r7, ror #8 ; [p3 | p1] (A) 98 usub16 r7, r10, r11 ; [d3 | d1] (A) 101 ldr r11, [r3], #4 ; pred (B) 110 uxtb16 r9, r11 ; [p2 | p0] (B) 112 uxtb16 r11, r11, ror #8 ; [p3 | p1] (B) 115 usub16 r7, r10, r11 ; [d3 | d1] (B) 138 uxtb16 r11, r7, ror #8 ; [p3 | p1] (A) 141 usub16 r7, r10, r11 ; [d3 | d1] (A) 144 ldr r11, [r3], #4 ; pred (B) 153 uxtb16 r9, r11 ; [p2 | p0] (B) [all …]
|
D | vp8_variance16x16_armv6.asm | 29 mov r11, #0 ; initialize sse = 0 55 smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) 60 smlad r11, r10, r10, r11 ; dual signed multiply, add and accumulate (2) 79 smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) 84 smlad r11, r10, r10, r11 ; dual signed multiply, add and accumulate (2) 103 smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) 108 smlad r11, r10, r10, r11 ; dual signed multiply, add and accumulate (2) 129 smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) 130 smlad r11, r10, r10, r11 ; dual signed multiply, add and accumulate (2) 140 str r11, [r6] ; store sse [all …]
|
/external/openssl/crypto/modes/asm/ |
D | ghash-armv4.s | 28 stmdb sp!,{r3-r11,lr} @ save r3/end too 31 ldmia r12,{r4-r11} @ copy rem_4bit ... 32 stmdb sp!,{r4-r11} @ ... to stack 44 add r11,r1,r14 48 ldmia r11,{r8-r11} @ load Htbl[nhi] 58 eor r7,r11,r7,lsr#4 65 add r11,r1,r12,lsl#4 69 ldmia r11,{r8-r11} @ load Htbl[nlo] 78 eor r7,r11,r7,lsr#4 80 add r11,r1,r14 [all …]
|
/external/valgrind/main/coregrind/m_syswrap/ |
D | syscall-amd64-darwin.S | 117 movq -16(%rbp), %r11 /* r11 = VexGuestAMD64State * */ 118 movq OFFSET_amd64_RDI(%r11), %rdi 119 movq OFFSET_amd64_RSI(%r11), %rsi 120 movq OFFSET_amd64_RDX(%r11), %rdx 121 movq OFFSET_amd64_RCX(%r11), %r10 /* rcx is passed in r10 instead */ 122 movq OFFSET_amd64_R8(%r11), %r8 123 movq OFFSET_amd64_R9(%r11), %r9 125 movq OFFSET_amd64_RSP(%r11), %r11 /* r11 = simulated RSP */ 126 movq 16(%r11), %rax 128 movq 8(%r11), %rax [all …]
|
/external/libvpx/vpx_scale/arm/neon/ |
D | vp8_vpxyv12_copyframe_func_neon.asm | 24 push {r4 - r11, lr} 33 ldr r11, [r1, #yv12_buffer_config_v_buffer] ;srcptr1 45 str r11, [sp, #12] 54 add r11, r3, r7 70 vst1.8 {q8, q9}, [r11]! 72 vst1.8 {q10, q11}, [r11]! 74 vst1.8 {q12, q13}, [r11]! 76 vst1.8 {q14, q15}, [r11]! 87 sub r11, r5, r10 112 add r11, r3, r7 [all …]
|
D | vp8_vpxyv12_copysrcframe_func_neon.asm | 27 push {r4 - r11, lr} 39 add r11, r3, r7 ;second row dst 63 vst1.8 {q4, q5}, [r11]! 65 vst1.8 {q6, q7}, [r11]! 67 vst1.8 {q12, q13}, [r11]! 69 vst1.8 {q14, q15}, [r11]! 81 vst1.8 {d1}, [r11]! 92 strb r8, [r11], #1 100 add r11, r11, r7 150 add r11, r3, r7 ;second row dst [all …]
|
/external/libffi/src/s390/ |
D | sysv.S | 52 lr %r11,%r15 # Set up frame pointer 59 l %r7,96(%r11) # Load function address 60 st %r11,0(%r15) # Set up back chain 61 ahi %r11,-48 # Register save area 68 lm %r2,%r6,0(%r11) # Load arguments 69 ld %f0,32(%r11) 70 ld %f2,40(%r11) 75 l %r4,48+56(%r11) 76 lm %r6,%r15,48+24(%r11) 80 l %r4,48+56(%r11) [all …]
|
/external/openssl/crypto/aes/asm/ |
D | vpaes-x86_64.pl | 77 ## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax 85 mov \$16, %r11 113 movdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[] 117 movdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[] 123 add \$16, %r11 # next mc 126 and \$0x30, %r11 # ... mod 4 160 movdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[] 179 mov %rax, %r11 182 shl \$4, %r11 186 xor \$0x30, %r11 [all …]
|
D | aes-armv4.s | 120 mov r11,r2 221 ldmia r11!,{r4-r7} 223 ldr r12,[r11,#240-16] 274 ldr r7,[r11],#16 278 ldr r4,[r11,#-12] 281 ldr r5,[r11,#-8] 283 ldr r6,[r11,#-4] 336 ldr r7,[r11,#0] 339 ldr r4,[r11,#4] 341 ldr r5,[r11,#8] [all …]
|
/external/libvpx/vp8/decoder/arm/armv6/ |
D | dequant_idct_v6.asm | 24 stmdb sp!, {r4-r11, lr} 73 smulwt r11, r3, r12 80 pkhbt r9, r9, r11, lsl #16 81 ldr r11, [r0], #4 86 uadd16 r10, r11, r14 87 usub16 r8, r11, r14 110 pkhbt r11, r8, r6, lsl #16 116 uadd16 r10, r11, lr 117 usub16 lr, r11, lr 122 smulwt r11, r4, r8 [all …]
|
D | dequant_dc_idct_v6.asm | 27 stmdb sp!, {r4-r11, lr} 95 smulwt r11, r3, r12 102 pkhbt r9, r9, r11, lsl #16 103 ldr r11, [r0], #4 108 uadd16 r10, r11, r14 109 usub16 r8, r11, r14 132 pkhbt r11, r8, r6, lsl #16 138 uadd16 r10, r11, lr 139 usub16 lr, r11, lr 144 smulwt r11, r4, r8 [all …]
|
/external/flac/libFLAC/ppc/as/ |
D | lpc_asm.s | 81 li r11,16 82 subf r31,r31,r11 ; r31: 4-(data%4) 104 mr r11,r8 105 lvsl v16,0,r11 ; v16: history alignment permutation vector 111 lvx v8,0,r11 112 addi r11,r11,-16 113 lvx v9,0,r11 126 addi r11,r11,-16 127 lvx v10,0,r11 140 addi r11,r11,-16 [all …]
|
/external/flac/libFLAC/ppc/gas/ |
D | lpc_asm.s | 83 li r11,16 84 subf r31,r31,r11 # r31: 4-(data%4) 106 mr r11,r8 107 lvsl v16,0,r11 # v16: history alignment permutation vector 113 lvx v8,0,r11 114 addi r11,r11,-16 115 lvx v9,0,r11 128 addi r11,r11,-16 129 lvx v10,0,r11 142 addi r11,r11,-16 [all …]
|