Home
last modified time | relevance | path

Searched refs:T1 (Results 1 – 25 of 28) sorted by relevance

12

/arch/x86/crypto/
Daesni-intel_avx-x86_64.S291 .macro GHASH_MUL_AVX GH HK T1 T2 T3 T4 T5
298 vpclmulqdq $0x11, \HK, \GH, \T1 # T1 = a1*b1
302 vpxor \T1, \T2,\T2 # T2 = a0*b1+a1*b0
307 vpxor \T2, \T1, \T1 # <T1:GH> = GH x HK
332 vpxor \T1, \GH, \GH # the result is in GH
337 .macro PRECOMPUTE_AVX HK T1 T2 T3 T4 T5 T6
342 vpshufd $0b01001110, \T5, \T1
343 vpxor \T5, \T1, \T1
344 vmovdqa \T1, HashKey_k(arg1)
346 GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^2<<1 mod poly
[all …]
Dsha256-avx2-asm.S110 T1 = %r12d define
167 rorx $13, a, T1 # T1 = a >> 13 # S0B
181 xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0
182 rorx $2, a, T1 # T1 = (a >> 2) # S0
186 xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0
187 mov a, T1 # T1 = a # MAJB
188 and c, T1 # T1 = a&c # MAJB
192 or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
217 rorx $13, a, T1 # T1 = a >> 13 # S0B
230 xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0
[all …]
Dsha512-avx2-asm.S95 T1 = %r12 # clobbers CTX2 define
192 rorx $34, a, T1 # T1 = a >> 34 # S0B
204 xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0
205 rorx $28, a, T1 # T1 = (a >> 28) # S0
208 xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0
209 mov a, T1 # T1 = a # MAJB
210 and c, T1 # T1 = a&c # MAJB
213 or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
256 rorx $34, a, T1 # T1 = a >> 34 # S0B
268 xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0
[all …]
Dghash-clmulni-intel_asm.S30 #define T1 %xmm2 macro
51 movaps DATA, T1
58 PCLMULQDQ 0x11 SHASH T1 # T1 = a1 * b1
61 pxor T1, T2 # T2 = a0 * b1 + a1 * b0
67 pxor T2, T1 # <T1:DATA> is result of
81 pxor T3, T1
90 pxor T2, T1
91 pxor T1, DATA
Dsha512-avx-asm.S62 T1 = %rcx define
128 mov f_64, T1 # T1 = f
130 xor g_64, T1 # T1 = f ^ g
132 and e_64, T1 # T1 = (f ^ g) & e
134 xor g_64, T1 # T1 = ((f ^ g) & e) ^ g = CH(e,f,g)
136 add WK_2(idx), T1 # W[t] + K[t] from message scheduler
140 add h_64, T1 # T1 = CH(e,f,g) + W[t] + K[t] + h
142 add tmp0, T1 # T1 = CH(e,f,g) + W[t] + K[t] + S1(e)
151 add T1, d_64 # e(next_state) = d + T1
154 lea (T1, T2), h_64 # a(next_state) = T1 + Maj(a,b,c)
[all …]
Dsha512-ssse3-asm.S61 T1 = %rcx define
121 mov f_64, T1 # T1 = f
123 xor g_64, T1 # T1 = f ^ g
125 and e_64, T1 # T1 = (f ^ g) & e
127 xor g_64, T1 # T1 = ((f ^ g) & e) ^ g = CH(e,f,g)
129 add WK_2(idx), T1 # W[t] + K[t] from message scheduler
133 add h_64, T1 # T1 = CH(e,f,g) + W[t] + K[t] + h
135 add tmp0, T1 # T1 = CH(e,f,g) + W[t] + K[t] + S1(e)
144 add T1, d_64 # e(next_state) = d + T1
147 lea (T1, T2), h_64 # a(next_state) = T1 + Maj(a,b,c)
[all …]
Dsha1_ssse3_asm.S200 .set T1, REG_T1 define
219 mov \c, T1
220 SWAP_REG_NAMES \c, T1
221 xor \d, T1
222 and \b, T1
223 xor \d, T1
227 mov \d, T1
228 SWAP_REG_NAMES \d, T1
229 xor \c, T1
230 xor \b, T1
[all …]
Dsha1_avx2_x86_64_asm.S117 .set T1, REG_T1 define
360 andn D, TB, T1
362 xor T1, TB
385 andn C, A, T1 /* ~b&d */
398 xor T1, A /* F1 = (b&c) ^ (~b&d) */
431 mov B, T1
432 or A, T1
440 and C, T1
442 or T1, A
Dtwofish-x86_64-asm_64-3way.S92 #define do16bit_ror(rot, op1, op2, T0, T1, tmp1, tmp2, ab, dst) \ argument
97 op2##l T1(CTX, tmp1, 4), dst ## d;
Dcamellia-x86_64-asm_64.S94 #define xor2ror16(T0, T1, tmp1, tmp2, ab, dst) \ argument
99 xorq T1(, tmp1, 8), dst;
/arch/arm/crypto/
Dghash-ce-core.S15 T1 .req q1
136 vmull.p64 T1, XL_L, MASK
139 vext.8 T1, T1, T1, #8
141 veor T1, T1, XL
154 vshl.i64 T1, XL, #57
156 veor T1, T1, T2
158 veor T1, T1, T2
162 vshr.u64 T1, XL, #1
164 veor XL, XL, T1
165 vshr.u64 T1, T1, #6
[all …]
Dsha256-armv4.pl46 $T1="r3"; $t3="r3";
286 my ($T0,$T1,$T2,$T3,$T4,$T5)=("q8","q9","q10","q11","d24","d25");
310 &vext_8 ($T1,@X[2],@X[3],4); # X[9..12]
317 &vadd_i32 (@X[0],@X[0],$T1); # X[0..3] += X[9..12]
320 &vshr_u32 ($T1,$T0,$sigma0[2]);
329 &veor ($T1,$T1,$T2);
338 &veor ($T1,$T1,$T3); # sigma0(X[1..4])
347 &vadd_i32 (@X[0],@X[0],$T1); # X[0..3] += sigma0(X[1..4])
478 vld1.32 {$T1},[$Ktbl,:128]!
491 vadd.i32 $T1,$T1,@X[1]
[all …]
Dnh-neon-core.S39 T1 .req q9
59 vadd.u32 T1, T3, \k1
114 vst1.8 {T0-T1}, [HASH]
Dsha512-armv4.pl501 my ($t0,$t1,$t2,$T1,$K,$Ch,$Maj)=map("d$_",(24..31)); # temps
527 vadd.i64 $T1,$Ch,$h
530 vadd.i64 $T1,$t2
537 vadd.i64 $T1,$K
540 vadd.i64 $d,$T1
541 vadd.i64 $Maj,$T1
/arch/arm64/crypto/
Dghash-ce-core.S16 T1 .req v2
150 movi T1.8b, #8
152 eor perm1.16b, perm1.16b, T1.16b
155 ushr T1.2d, perm1.2d, #24
158 sli T1.2d, perm1.2d, #40
164 tbl sh4.16b, {SHASH.16b}, T1.16b
177 eor XM.16b, XM.16b, T1.16b
192 eor XM.16b, XM.16b, T1.16b
197 shl T1.2d, XL.2d, #57
199 eor T2.16b, T2.16b, T1.16b
[all …]
Dsha512-armv8.pl101 my ($T0,$T1,$T2)=(@X[($i-8)&15],@X[($i-9)&15],@X[($i-10)&15]);
156 ror $T1,@X[($j+1)&15],#$sigma0[0]
163 eor $T1,$T1,@X[($j+1)&15],ror#$sigma0[1]
171 eor $T1,$T1,@X[($j+1)&15],lsr#$sigma0[2] // sigma0(X[i+1])
180 add @X[$j],@X[$j],$T1
455 my ($T0,$T1,$T2,$T3,$T4,$T5,$T6,$T7) = map("q$_",(4..7,16..19));
487 &ushr_32 ($T1,$T0,$sigma0[2]);
497 &eor_8 ($T1,$T1,$T2);
506 &eor_8 ($T1,$T1,$T3); # sigma0(X[1..4])
518 &add_32 (@X[0],@X[0],$T1); # X[0..3] += sigma0(X[1..4])
[all …]
/arch/sparc/crypto/
Daes_asm.S7 #define ENCRYPT_TWO_ROUNDS(KEY_BASE, I0, I1, T0, T1) \ argument
9 AES_EROUND23(KEY_BASE + 2, I0, I1, T1) \
10 AES_EROUND01(KEY_BASE + 4, T0, T1, I0) \
11 AES_EROUND23(KEY_BASE + 6, T0, T1, I1)
13 #define ENCRYPT_TWO_ROUNDS_2(KEY_BASE, I0, I1, I2, I3, T0, T1, T2, T3) \ argument
15 AES_EROUND23(KEY_BASE + 2, I0, I1, T1) \
18 AES_EROUND01(KEY_BASE + 4, T0, T1, I0) \
19 AES_EROUND23(KEY_BASE + 6, T0, T1, I1) \
23 #define ENCRYPT_TWO_ROUNDS_LAST(KEY_BASE, I0, I1, T0, T1) \ argument
25 AES_EROUND23(KEY_BASE + 2, I0, I1, T1) \
[all …]
/arch/mips/mm/
Dpage.c46 #define T1 9 macro
477 build_copy_load(&buf, T1, off + copy_word_size); in build_copy_page()
485 build_copy_store(&buf, T1, off + copy_word_size); in build_copy_page()
499 build_copy_load(&buf, T1, off + copy_word_size); in build_copy_page()
507 build_copy_store(&buf, T1, off + copy_word_size); in build_copy_page()
524 build_copy_load(&buf, T1, off + copy_word_size); in build_copy_page()
530 build_copy_store(&buf, T1, off + copy_word_size); in build_copy_page()
542 build_copy_load(&buf, T1, off + copy_word_size); in build_copy_page()
548 build_copy_store(&buf, T1, off + copy_word_size); in build_copy_page()
566 build_copy_load(&buf, T1, off + copy_word_size); in build_copy_page()
[all …]
/arch/mips/kvm/
Dentry.c32 #define T1 9 macro
39 #define T1 13 macro
347 uasm_i_ext(&p, T1, T0, MIPS_GCTL1_ID_SHIFT, in kvm_mips_build_enter_guest()
349 uasm_i_ins(&p, T0, T1, MIPS_GCTL1_RID_SHIFT, in kvm_mips_build_enter_guest()
365 UASM_i_ADDIU(&p, T1, S0, in kvm_mips_build_enter_guest()
375 UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch, in kvm_mips_build_enter_guest()
378 UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch, in kvm_mips_build_enter_guest()
388 UASM_i_ADDU(&p, T3, T1, T2); in kvm_mips_build_enter_guest()
414 (int)offsetof(struct mm_struct, context.asid), T1); in kvm_mips_build_enter_guest()
/arch/x86/crypto/sha1-mb/
Dsha1_x8_avx2.S232 # ymm7 T1 CC
250 T1 = %ymm7 define
335 VMOVPS (inp1, IDX), T1
343 TRANSPOSE8 T0, T1, T2, T3, T4, T5, T6, T7, T8, T9
346 vpshufb F, T1, T1
347 vmovdqu T1, (I*8+1)*32(%rsp)
/arch/m68k/fpsp040/
Dscosh.S63 T1: .long 0x40C62D38,0xD3D64634 | ... 16381 LOG2 LEAD label
113 fsubd T1(%pc),%fp0 | ...(|X|-16381LOG2_LEAD)
Dssinh.S59 T1: .long 0x40C62D38,0xD3D64634 | ... 16381 LOG2 LEAD label
116 fsubd T1(%pc),%fp0 | ...(|X|-16381LOG2_LEAD)
/arch/x86/crypto/sha512-mb/
Dsha512_x4_avx2.S104 T1 = %ymm14 define
190 vpaddq (TBL,ROUND,1), \_T1, \_T1 # T1 = W + K
199 vpxor c, \_T1, \_T1 # maj: T1 = a^c
201 vpand b, \_T1, \_T1 # maj: T1 = (a^c)&b
311 ROUND_16_XX T1, i
/arch/x86/crypto/sha256-mb/
Dsha256_x8_avx2.S98 T1 = %ymm8 define
227 vpaddd (TBL,ROUND,1), \_T1, \_T1 # T1 = W + K
235 vpxor c, a, \_T1 # maj: T1 = a^c
237 vpand b, \_T1, \_T1 # maj: T1 = (a^c)&b
381 ROUND_16_XX T1, i
/arch/metag/tbx/
Dtbictx.S175 D SETD [A0.2++],T1
343 D GETD T1,[A0.2++]

12