Searched refs:TMP2 (Results 1 – 6 of 6) sorted by relevance
/arch/x86/crypto/ |
D | aesni-intel_asm.S | 164 .macro GHASH_MUL GH HK TMP1 TMP2 TMP3 TMP4 TMP5 166 pshufd $78, \GH, \TMP2 168 pxor \GH, \TMP2 # TMP2 = a1+a0 172 PCLMULQDQ 0x00, \TMP3, \TMP2 # TMP2 = (a0+a1)*(b1+b0) 173 pxor \GH, \TMP2 174 pxor \TMP1, \TMP2 # TMP2 = (a0*b0)+(a1*b0) 175 movdqa \TMP2, \TMP3 177 psrldq $8, \TMP2 # right shift TMP2 2 DWs 179 pxor \TMP2, \TMP1 # TMP2:GH holds the result of GH*HK 183 movdqa \GH, \TMP2 [all …]
|
D | aesni-intel_avx-x86_64.S | 216 TMP2 = 16*1 # Temporary storage for AES State 2 (State 1 is stored in an XMM register) define 636 vmovdqa \XMM2, TMP2(%rsp) 737 vmovdqa TMP2(%rsp), \T1 1900 vmovdqa \XMM2, TMP2(%rsp) 1998 vmovdqa TMP2(%rsp), \T1
|
/arch/arm/crypto/ |
D | speck-neon-core.S | 66 TMP2 .req q14 122 vshl.u\n TMP2, Y2, #3 126 vsri.u\n TMP2, Y2, #(\n - 3) 132 veor Y2, TMP2, X2 146 veor TMP2, Y2, X2 152 vshr.u\n Y2, TMP2, #3 156 vsli.u\n Y2, TMP2, #(\n - 3) 384 vld1.8 {TMP2, TMP3}, [r12:128]! 387 veor X1, TMP2 390 vld1.8 {TMP2, TMP3}, [r12:128]! [all …]
|
/arch/arm64/crypto/ |
D | speck-neon-core.S | 44 TMP2 .req v12 109 shl TMP2.\lanes, Y_2.\lanes, #3 113 sri TMP2.\lanes, Y_2.\lanes, #(\n - 3) 119 eor Y_2.16b, TMP2.16b, X_2.16b 133 eor TMP2.16b, Y_2.16b, X_2.16b 139 ushr Y_2.\lanes, TMP2.\lanes, #3 143 sli Y_2.\lanes, TMP2.\lanes, #(\n - 3) 265 eor TMP2.16b, X_2.16b, TWEAKV4.16b 280 uzp2 X_2.\lanes, TMP2.\lanes, Y_2.\lanes 281 uzp1 Y_2.\lanes, TMP2.\lanes, Y_2.\lanes [all …]
|
/arch/sparc/kernel/ |
D | sun4v_tlb_miss.S | 35 #define COMPUTE_TSB_PTR(TSB_PTR, VADDR, HASH_SHIFT, TMP1, TMP2) \ argument 37 mov 512, TMP2; \ 39 sllx TMP2, TMP1, TMP2; \ 41 sub TMP2, 1, TMP2; \ 42 and TMP1, TMP2, TMP1; \
|
/arch/sparc/crypto/ |
D | camellia_asm.S | 76 #define ROTL128(S01, S23, TMP1, TMP2, N) \ argument 79 srlx S23, (64 - N), TMP2; \ 81 or S01, TMP2, S01; \
|