/external/boringssl/src/ssl/test/runner/poly1305/ |
D | sum_arm.s | 35 MOVW R5>>8, R12 44 AND R12, R6, R6 75 ADD $24, R13, R12 76 MOVM.IB [R4-R8, R14], (R12) 81 MOVW R2, R12 90 CMP $16, R12 111 MOVW R2>>14, R12 116 ORR R3<<18, R12, R12 121 BIC $0xfc000000, R12, R12 127 ADD R12, R8, R8 [all …]
|
D | sum_amd64.s | 69 MOVQ 8(AX), R12 71 ANDQ ·poly1305Mask<>+8(SB), R12 // r1 83 POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14)
|
/external/boringssl/src/ssl/test/runner/curve25519/ |
D | freeze_amd64.s | 26 MOVQ SI,R12 27 SHRQ $51,R12 29 ADDQ R12,DX 30 MOVQ DX,R12 31 SHRQ $51,R12 33 ADDQ R12,CX 34 MOVQ CX,R12 35 SHRQ $51,R12 37 ADDQ R12,R8 38 MOVQ R8,R12 [all …]
|
D | ladderstep_amd64.s | 24 MOVQ R8,R12 29 ADDQ ·_2P1234(SB),R12 39 SUBQ 104(DI),R12 49 MOVQ R12,64(SP) 68 MOVQ AX,R12 82 ADDQ AX,R12 121 ADDQ AX,R12 132 SHLQ $13,R13:R12 133 ANDQ DX,R12 134 ADDQ R11,R12 [all …]
|
D | mul_amd64.s | 41 MOVQ AX,R12 57 ADDQ AX,R12 74 ADDQ AX,R12 108 ADDQ AX,R12 120 ADDQ AX,R12 132 SHLQ $13,R13:R12 133 ANDQ SI,R12 134 ADDQ R11,R12 149 ADDQ R12,DX
|
D | square_amd64.s | 30 MOVQ DX,R12 44 ADCQ DX,R12 83 ADCQ DX,R12 95 SHLQ $13,R12:R11 100 ADDQ R12,R13
|
/external/libxaac/decoder/armv7/ |
D | ixheaacd_cos_sin_mod.s | 40 STMFD SP!, {R4-R12, R14} 54 AND R12, R10, #7 55 CMP R12, #0 93 SMULWT R12, R1, R2 100 QSUB R12, R12, R6 106 STR R12, [R10, #4] 110 SMULWB R12, R1, R2 115 QSUB R12, R12, R6 122 STR R12, [R10, #0xF8] 127 SMULWT R12, R0, R2 [all …]
|
D | ixheaacd_sbr_qmfsyn64_winadd.s | 28 STMFD sp!, {R4-R12, R14} 35 MOV R12, R2 50 ADD R12, R12, R6 90 VLD1.16 D11, [R12]! 95 MOV R11, R12 97 ADD R12, R12, #248 100 VLD1.16 D13, [R12], R9 105 VLD1.16 D15, [R12], R9 110 VLD1.16 D17, [R12], R9 115 VLD1.16 D19, [R12], R9 [all …]
|
D | ixheaacd_enery_calc_per_subband.s | 34 SUB R12, R3, R2 83 LDR R12, [R8], #0x100 86 EOR R12, R12, R12, ASR #31 88 ORRGE R6, R6, R12 102 LDR R12, [R8], #0x100 106 MOV R12, R12, ASR R14 107 SMLABB R6, R12, R12, R6 113 RSB R12, R14, #0 119 MOV R4, R4, LSL R12 121 MOV R3, R3, LSL R12 [all …]
|
D | ixheaacd_esbr_qmfsyn64_winadd.s | 19 STMFD sp!, {R4-R12, R14} 25 MOV R12, R2 37 ADD R12, R12, R6 84 VLD1.32 {D2, D3}, [R12]! 86 MOV R11, R12 91 ADD R12, R12, #496 93 VLD1.32 {D6, D7}, [R12], R9 99 VLD1.32 {D10, D11}, [R12], R9 105 VLD1.32 {D14, D15}, [R12], R9 111 VLD1.32 {D18, D19}, [R12], R9 [all …]
|
D | ixheaacd_apply_rot.s | 27 STMFD SP!, {R4-R12, R14} 74 LDR R12, [R0, #44] 80 LDR R5, [R12] 82 LDR R6, [R12, #0x80] 94 STR R5, [R12], #4 96 STR R14, [R12, #0x7c] 98 LDR R5, [R12, #0x3c] 99 LDR R6, [R12, #0xbc] 111 STR R5, [R12, #0x3c] 113 STR R14, [R12, #0xbc] [all …]
|
D | ixheaacd_conv_ergtoamplitudelp.s | 37 MOV R12, #0 52 LDRH R12, [R6, R5] 55 SMULWBNE R12, R12, R11 128 MOV R12, R12, LSL R6 129 CMP R12, #0x8000 130 MVNGE R12, #0x8000 131 CMNLT R12, #0x00008000 132 MOVLT R12, #0x00008000 133 STRH R12, [R2], #4 139 MOV R12, R12, ASR R6 [all …]
|
D | ixheaacd_overlap_add2.s | 28 STMFD sp!, {R4-R12, R14} 44 SUB R12, R5, #1 46 MOV R12, R12, LSL #2 48 ADD R7, R1, R12 53 MOV R12, #-16 54 VLD2.16 {D6, D7}, [R7], R12 70 VLD2.16 {D14, D15}, [R7], R12 83 VLD2.16 {D6, D7}, [R7], R12 104 VLD2.16 {D14, D15}, [R7], R12 132 MOV R12, #12 [all …]
|
D | ixheaacd_tns_parcor2lpc_32x16.s | 26 STMFD SP!, {R2, R4-R12, R14} 51 MOV R12, R3 67 SUBS R12, R12, #1 75 MOV R12, R3 95 SUBS R12, R12, #1
|
D | ixheaacd_overlap_add1.s | 29 STMFD sp!, {R4-R12, R14} 43 MOV R12, #0 44 VDUP.S16 D12, R12 45 MOV R12, #-16 47 VLD1.32 {D6, D7}, [R10], R12 58 VLD2.16 {D2, D3}, [R8], R12 96 VLD1.32 {D6, D7}, [R10], R12 113 VLD2.16 {D2, D3}, [R8], R12 140 VLD1.32 {D6, D7}, [R10], R12 160 VLD2.16 {D2, D3}, [R8], R12 [all …]
|
D | ixheaacd_conv_ergtoamplitude.s | 38 MOV R12, #0 51 LDRH R12, [R11, R5] 55 SMULWBNE R12, R12, R10 60 STRH R12, [R2, #-4]
|
D | ixheaacd_post_twiddle_overlap.s | 28 STMFD sp!, {R4-R12} 65 SMULWB R12, R9, R10 68 SUB R8, R12, R11 73 MOV R12, #-50 75 SMULWB R11, R8, R12 87 SMULWT R12, R8, R10 109 MOVS R8, R12, ASR R9 111 MOVLT R12, #0x80000000 112 MVNGT R12, #0x80000000 113 MOVEQ R12, R12, LSL R11 [all …]
|
D | ia_xheaacd_mps_mulshift.s | 28 STMFD sp!, {R4-R12} 44 LDMFD sp!, {R4-R12}
|
D | ixheaacd_dec_DCT2_64_asm.s | 111 SUB R12, R5, #32 150 VST2.32 {Q6, Q7}, [R12] 154 SUB R12, R5, #32 205 VST2.32 {Q6, Q7}, [R12] 209 SUB R12, R5, #32 250 VST2.32 {Q6, Q7}, [R12] 257 SUB R12, R5, #32 289 VST2.32 {Q6, Q7}, [R12]
|
D | ixheaacd_dct3_32.s | 33 STMFD sp!, {R4-R12, R14} 49 MOV R12, #-16 54 VLD1.32 {Q1}, [R7], R12 71 VLD1.32 {Q4}, [R5], R12 86 VLD1.32 {Q1}, [R7], R12 116 VLD1.32 {Q4}, [R5], R12 262 MOV R12, #4 429 VLD1.16 D10, [R4], R12
|
D | ixheaacd_calcmaxspectralline.s | 28 STMFD sp!, {R4-R12, R14} 77 LDMFD sp!, {R4-R12, R15}
|
/external/llvm/test/CodeGen/Mips/ |
D | atomic.ll | 149 ; ALL: ll $[[R12:[0-9]+]], 0($[[R2]]) 150 ; ALL: addu $[[R13:[0-9]+]], $[[R12]], $[[R9]] 152 ; ALL: and $[[R15:[0-9]+]], $[[R12]], $[[R8]] 159 ; ALL: and $[[R17:[0-9]+]], $[[R12]], $[[R7]] 194 ; ALL: ll $[[R12:[0-9]+]], 0($[[R2]]) 195 ; ALL: subu $[[R13:[0-9]+]], $[[R12]], $[[R9]] 197 ; ALL: and $[[R15:[0-9]+]], $[[R12]], $[[R8]] 204 ; ALL: and $[[R17:[0-9]+]], $[[R12]], $[[R7]] 239 ; ALL: ll $[[R12:[0-9]+]], 0($[[R2]]) 240 ; ALL: and $[[R13:[0-9]+]], $[[R12]], $[[R9]] [all …]
|
/external/swiftshader/third_party/LLVM/test/CodeGen/Mips/ |
D | atomic.ll | 91 ; CHECK: and $[[R12:[0-9]+]], $[[R11]], $[[R6]] 93 ; CHECK: or $[[R14:[0-9]+]], $[[R13]], $[[R12]] 122 ; CHECK: and $[[R12:[0-9]+]], $[[R11]], $[[R6]] 124 ; CHECK: or $[[R14:[0-9]+]], $[[R13]], $[[R12]] 154 ; CHECK: and $[[R12:[0-9]+]], $[[R11]], $[[R6]] 156 ; CHECK: or $[[R14:[0-9]+]], $[[R13]], $[[R12]] 215 ; CHECK: ll $[[R12:[0-9]+]], 0($[[R2]]) 216 ; CHECK: and $[[R13:[0-9]+]], $[[R12]], $[[R6]] 219 ; CHECK: and $[[R14:[0-9]+]], $[[R12]], $[[R7]]
|
/external/swiftshader/third_party/LLVM/test/CodeGen/PowerPC/ |
D | 2010-03-09-indirect-call.ll | 5 ; Indirect calls must use R12 on Darwin (i.e., R12 must contain the address of
|
/external/llvm/test/CodeGen/PowerPC/ |
D | 2010-03-09-indirect-call.ll | 5 ; Indirect calls must use R12 on Darwin (i.e., R12 must contain the address of
|