/arch/mips/crypto/ |
D | poly1305-mips.pl | 76 my ($in0,$in1,$tmp0,$tmp1,$tmp2,$tmp3,$tmp4) = ($a4,$a5,$a6,$a7,$at,$t0,$t1); 131 andi $tmp0,$inp,7 # $inp % 8 132 dsubu $inp,$inp,$tmp0 # align $inp 133 sll $tmp0,$tmp0,3 # byte to bit offset 136 beqz $tmp0,.Laligned_key 139 subu $tmp1,$zero,$tmp0 141 dsllv $in0,$in0,$tmp0 143 dsllv $in1,$in1,$tmp0 146 dsrlv $in0,$in0,$tmp0 148 dsrlv $in1,$in1,$tmp0 [all …]
|
/arch/x86/crypto/ |
D | sha512-avx-asm.S | 71 tmp0 = %rax define 128 mov e_64, tmp0 # tmp = e 130 RORQ tmp0, 23 # 41 # tmp = e ror 23 132 xor e_64, tmp0 # tmp = (e ror 23) ^ e 136 RORQ tmp0, 4 # 18 # tmp = ((e ror 23) ^ e) ror 4 137 xor e_64, tmp0 # tmp = (((e ror 23) ^ e) ror 4) ^ e 140 RORQ tmp0, 14 # 14 # tmp = ((((e ror23)^e)ror4)^e)ror14 = S1(e) 141 add tmp0, T1 # T1 = CH(e,f,g) + W[t] + K[t] + S1(e) 142 mov a_64, tmp0 # tmp = a 144 and c_64, tmp0 # tmp = a & c [all …]
|
D | sha512-ssse3-asm.S | 71 tmp0 = %rax define 122 mov e_64, tmp0 # tmp = e 124 ror $23, tmp0 # 41 # tmp = e ror 23 126 xor e_64, tmp0 # tmp = (e ror 23) ^ e 130 ror $4, tmp0 # 18 # tmp = ((e ror 23) ^ e) ror 4 131 xor e_64, tmp0 # tmp = (((e ror 23) ^ e) ror 4) ^ e 134 ror $14, tmp0 # 14 # tmp = ((((e ror23)^e)ror4)^e)ror14 = S1(e) 135 add tmp0, T1 # T1 = CH(e,f,g) + W[t] + K[t] + S1(e) 136 mov a_64, tmp0 # tmp = a 138 and c_64, tmp0 # tmp = a & c [all …]
|
D | glue_helper-asm-avx2.S | 106 #define gf128mul_x2_ble(iv, mask1, mask2, tmp0, tmp1) \ argument 107 vpsrad $31, iv, tmp0; \ 110 vpshufd $0x13, tmp0, tmp0; \ 112 vpand mask2, tmp0, tmp0; \ 114 vpxor tmp0, iv, iv; \
|
D | camellia-aesni-avx2-asm_64.S | 25 #define filter_8bit(x, lo_t, hi_t, mask4bit, tmp0) \ argument 26 vpand x, mask4bit, tmp0; \ 30 vpshufb tmp0, lo_t, tmp0; \ 32 vpxor tmp0, x, x; 1212 #define gf128mul_x2_ble(iv, mask1, mask2, tmp0, tmp1) \ argument 1213 vpsrad $31, iv, tmp0; \ 1216 vpshufd $0x13, tmp0, tmp0; \ 1218 vpand mask2, tmp0, tmp0; \ 1220 vpxor tmp0, iv, iv; \
|
D | camellia-aesni-avx-asm_64.S | 34 #define filter_8bit(x, lo_t, hi_t, mask4bit, tmp0) \ argument 35 vpand x, mask4bit, tmp0; \ 39 vpshufb tmp0, lo_t, tmp0; \ 41 vpxor tmp0, x, x;
|
/arch/arm/include/asm/ |
D | uaccess-asm.h | 86 .macro uaccess_entry, tsk, tmp0, tmp1, tmp2, disable 90 DACR( mrc p15, 0, \tmp0, c3, c0, 0) 91 DACR( str \tmp0, [sp, #SVC_DACR]) 100 bic \tmp2, \tmp0, #domain_mask(DOMAIN_KERNEL) 108 .macro uaccess_exit, tsk, tmp0, tmp1 110 DACR( ldr \tmp0, [sp, #SVC_DACR]) 112 DACR( mcr p15, 0, \tmp0, c3, c0, 0)
|
/arch/arm/crypto/ |
D | sha1-armv7-neon.S | 67 #define tmp0 q8 macro 156 vadd.u32 tmp0, W0, curK; \ 162 vst1.32 {tmp0, tmp1}, [RWK]!; \ 179 vadd.u32 tmp0, W0, curK; \ 197 vst1.32 {tmp0, tmp1}, [RWK]!; \ 209 veor tmp0, tmp0; \ 214 vext.8 tmp0, W_m04, tmp0, #4; \ 217 veor tmp0, tmp0, W_m16; \ 222 veor W, W, tmp0; \ 225 vshl.u32 tmp0, W, #1; \ [all …]
|
/arch/arm64/crypto/ |
D | aes-neon.S | 158 .macro mul_by_x_2x, out0, out1, in0, in1, tmp0, tmp1, const 159 sshr \tmp0\().16b, \in0\().16b, #7 162 and \tmp0\().16b, \tmp0\().16b, \const\().16b 165 eor \out0\().16b, \out0\().16b, \tmp0\().16b 169 .macro mul_by_x2_2x, out0, out1, in0, in1, tmp0, tmp1, const 170 ushr \tmp0\().16b, \in0\().16b, #6 173 pmul \tmp0\().16b, \tmp0\().16b, \const\().16b 176 eor \out0\().16b, \out0\().16b, \tmp0\().16b
|
/arch/arm64/include/asm/ |
D | assembler.h | 375 .macro tcr_compute_pa_size, tcr, pos, tmp0, tmp1 376 mrs \tmp0, ID_AA64MMFR0_EL1 378 ubfx \tmp0, \tmp0, #ID_AA64MMFR0_PARANGE_SHIFT, #3 380 cmp \tmp0, \tmp1 381 csel \tmp0, \tmp1, \tmp0, hi 382 bfi \tcr, \tmp0, \pos, #3
|
/arch/xtensa/kernel/ |
D | setup.c | 451 unsigned long tmp0, tmp1, tmp2, tmp3; in cpu_reset() local 531 : "=&a"(tmp0), "=&a"(tmp1), "=&a"(tmp2), in cpu_reset()
|