Home
last modified time | relevance | path

Searched full:t4 (Results 1 – 25 of 501) sorted by relevance

12345678910>>...21

/kernel/linux/linux-6.6/arch/riscv/lib/
Dmemmove.S37 * Reverse Copy: t4 - Index counter of dest
54 add t4, a0, a2
69 andi t6, t4, -SZREG
163 sub a5, a4, t4 /* Find the difference between src and dest */
198 addi t4, t4, (-2 * SZREG)
202 REG_S t2, ( 1 * SZREG)(t4)
204 beq t4, a2, 2f
211 REG_S t2, ( 0 * SZREG)(t4)
213 bne t4, t5, 1b
215 mv t4, t5 /* Fix the dest pointer in case the loop was broken */
[all …]
/kernel/linux/linux-5.10/arch/alpha/lib/
Dev67-strrchr.S36 insbl a1, 1, t4 # U : 000000000000ch00
41 or t2, t4, a1 # E : 000000000000chch
48 sll a1, 48, t4 # U : chch000000000000
50 or t4, a1, a1 # E : chch00000000chch
56 mskqh t5, a0, t4 # E : Complete garbage mask
58 cmpbge zero, t4, t4 # E : bits set iff byte is garbage
61 andnot t1, t4, t1 # E : clear garbage from null test
62 andnot t3, t4, t3 # E : clear garbage from char test
84 negq t1, t4 # E : isolate first null byte match
85 and t1, t4, t4 # E :
[all …]
Dstrrchr.S31 lda t4, -1 # .. e1 : build garbage mask
34 mskqh t4, a0, t4 # e0 :
37 cmpbge zero, t4, t4 # .. e1 : bits set iff byte is garbage
39 andnot t1, t4, t1 # .. e1 : clear garbage from null test
40 andnot t3, t4, t3 # e0 : clear garbage from char test
56 negq t1, t4 # e0 : isolate first null byte match
57 and t1, t4, t4 # e1 :
58 subq t4, 1, t5 # e0 : build a mask of the bytes up to...
59 or t4, t5, t4 # e1 : ... and including the null
61 and t3, t4, t3 # e0 : mask out char matches after null
Dstrchr.S27 lda t4, -1 # .. e1 : build garbage mask
30 mskqh t4, a0, t4 # e0 :
33 cmpbge zero, t4, t4 # .. e1 : bits set iff byte is garbage
38 andnot t0, t4, t0 # e0 : clear garbage bits
58 and t0, 0xaa, t4 # e0 :
61 cmovne t4, 1, t4 # .. e1 :
63 addq v0, t4, v0 # .. e1 :
Dev67-strchr.S40 lda t4, -1 # E : build garbage mask
42 mskqh t4, a0, t4 # U : only want relevant part of first quad
50 cmpbge zero, t4, t4 # E : bits set iff byte is garbage
58 andnot t0, t4, t0 # E : clear garbage bits
/kernel/linux/linux-6.6/arch/alpha/lib/
Dev67-strrchr.S36 insbl a1, 1, t4 # U : 000000000000ch00
41 or t2, t4, a1 # E : 000000000000chch
48 sll a1, 48, t4 # U : chch000000000000
50 or t4, a1, a1 # E : chch00000000chch
56 mskqh t5, a0, t4 # E : Complete garbage mask
58 cmpbge zero, t4, t4 # E : bits set iff byte is garbage
61 andnot t1, t4, t1 # E : clear garbage from null test
62 andnot t3, t4, t3 # E : clear garbage from char test
84 negq t1, t4 # E : isolate first null byte match
85 and t1, t4, t4 # E :
[all …]
Dstrrchr.S31 lda t4, -1 # .. e1 : build garbage mask
34 mskqh t4, a0, t4 # e0 :
37 cmpbge zero, t4, t4 # .. e1 : bits set iff byte is garbage
39 andnot t1, t4, t1 # .. e1 : clear garbage from null test
40 andnot t3, t4, t3 # e0 : clear garbage from char test
56 negq t1, t4 # e0 : isolate first null byte match
57 and t1, t4, t4 # e1 :
58 subq t4, 1, t5 # e0 : build a mask of the bytes up to...
59 or t4, t5, t4 # e1 : ... and including the null
61 and t3, t4, t3 # e0 : mask out char matches after null
Dstrchr.S27 lda t4, -1 # .. e1 : build garbage mask
30 mskqh t4, a0, t4 # e0 :
33 cmpbge zero, t4, t4 # .. e1 : bits set iff byte is garbage
38 andnot t0, t4, t0 # e0 : clear garbage bits
58 and t0, 0xaa, t4 # e0 :
61 cmovne t4, 1, t4 # .. e1 :
63 addq v0, t4, v0 # .. e1 :
Dev67-strchr.S40 lda t4, -1 # E : build garbage mask
42 mskqh t4, a0, t4 # U : only want relevant part of first quad
50 cmpbge zero, t4, t4 # E : bits set iff byte is garbage
58 andnot t0, t4, t0 # E : clear garbage bits
/kernel/linux/linux-5.10/arch/x86/crypto/
Daesni-intel_avx-x86_64.S605 .macro CALC_AAD_HASH GHASH_MUL AAD AADLEN T1 T2 T3 T4 T5 T6 T7 T8
621 \GHASH_MUL \T8, \T2, \T1, \T3, \T4, \T5, \T6
667 \GHASH_MUL \T7, \T2, \T1, \T3, \T4, \T5, \T6
895 .macro GHASH_MUL_AVX GH HK T1 T2 T3 T4 T5
916 vpslld $25, \GH, \T4 # packed right shifting shift << 25
919 vpxor \T4, \T2, \T2
930 vpsrld $7,\GH, \T4 # packed left shifting >> 7
932 vpxor \T4, \T2, \T2
941 .macro PRECOMPUTE_AVX HK T1 T2 T3 T4 T5 T6
950 GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^2<<1 mod poly
[all …]
Dpoly1305-x86_64-cryptogams.pl420 my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) =
893 vpunpckhqdq $T1,$T0,$T4 # 4
897 vpsrlq \$40,$T4,$T4 # 4
905 vpor 32(%rcx),$T4,$T4 # padbit, yes, always
986 vpmuludq $T4,$D4,$D4 # d4 = h4*r0
989 vpmuludq 0x20(%rsp),$T4,$H0 # h4*s1
1012 vpmuludq $T4,$H4,$H0 # h4*s2
1023 vpmuludq $T4,$H3,$H0 # h4*s3
1035 vpmuludq $T4,$H4,$T4 # h4*s4
1038 vpaddq $T4,$D3,$D3 # d3 += h4*s4
[all …]
Dnh-avx2-x86_64.S30 #define T4 %ymm12 macro
48 vpshufd $0x10, T0, T4
56 vpmuludq T4, T0, T0
147 vinserti128 $0x1, T2_XMM, T0, T4 // T4 = (0A 1A 2A 3A)
152 vpaddq T5, T4, T4
154 vpaddq T4, T0, T0
Dglue_helper-asm-avx2.S56 t1x, t2, t2x, t3, t3x, t4, t5) \ argument
59 vpaddq t0, t0, t4; /* ab: -2:0 ; cd: -2:0 */\
70 add2_le128(t2, t0, t4, t3, t5); /* ab: le2 ; cd: le3 */ \
72 add2_le128(t2, t0, t4, t3, t5); \
74 add2_le128(t2, t0, t4, t3, t5); \
76 add2_le128(t2, t0, t4, t3, t5); \
78 add2_le128(t2, t0, t4, t3, t5); \
80 add2_le128(t2, t0, t4, t3, t5); \
82 add2_le128(t2, t0, t4, t3, t5); \
Dcamellia-aesni-avx2-asm_64.S63 #define roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ argument
68 vbroadcasti128 .Linv_shift_row, t4; \
76 vpshufb t4, x0, x0; \
77 vpshufb t4, x7, x7; \
78 vpshufb t4, x3, x3; \
79 vpshufb t4, x6, x6; \
80 vpshufb t4, x2, x2; \
81 vpshufb t4, x5, x5; \
82 vpshufb t4, x1, x1; \
83 vpshufb t4, x4, x4; \
[all …]
/kernel/linux/linux-6.6/arch/x86/crypto/
Daesni-intel_avx-x86_64.S571 .macro CALC_AAD_HASH GHASH_MUL AAD AADLEN T1 T2 T3 T4 T5 T6 T7 T8
587 \GHASH_MUL \T8, \T2, \T1, \T3, \T4, \T5, \T6
635 \GHASH_MUL \T7, \T2, \T1, \T3, \T4, \T5, \T6
863 .macro GHASH_MUL_AVX GH HK T1 T2 T3 T4 T5
884 vpslld $25, \GH, \T4 # packed right shifting shift << 25
887 vpxor \T4, \T2, \T2
898 vpsrld $7,\GH, \T4 # packed left shifting >> 7
900 vpxor \T4, \T2, \T2
909 .macro PRECOMPUTE_AVX HK T1 T2 T3 T4 T5 T6
918 GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^2<<1 mod poly
[all …]
Dpoly1305-x86_64-cryptogams.pl419 my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) =
892 vpunpckhqdq $T1,$T0,$T4 # 4
896 vpsrlq \$40,$T4,$T4 # 4
904 vpor 32(%rcx),$T4,$T4 # padbit, yes, always
985 vpmuludq $T4,$D4,$D4 # d4 = h4*r0
988 vpmuludq 0x20(%rsp),$T4,$H0 # h4*s1
1011 vpmuludq $T4,$H4,$H0 # h4*s2
1022 vpmuludq $T4,$H3,$H0 # h4*s3
1034 vpmuludq $T4,$H4,$T4 # h4*s4
1037 vpaddq $T4,$D3,$D3 # d3 += h4*s4
[all …]
Dcamellia-aesni-avx2-asm_64.S62 #define roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ argument
67 vbroadcasti128 .Linv_shift_row(%rip), t4; \
75 vpshufb t4, x0, x0; \
76 vpshufb t4, x7, x7; \
77 vpshufb t4, x3, x3; \
78 vpshufb t4, x6, x6; \
79 vpshufb t4, x2, x2; \
80 vpshufb t4, x5, x5; \
81 vpshufb t4, x1, x1; \
82 vpshufb t4, x4, x4; \
[all …]
Dnh-avx2-x86_64.S31 #define T4 %ymm12 macro
49 vpshufd $0x10, T0, T4
57 vpmuludq T4, T0, T0
148 vinserti128 $0x1, T2_XMM, T0, T4 // T4 = (0A 1A 2A 3A)
153 vpaddq T5, T4, T4
155 vpaddq T4, T0, T0
/kernel/linux/linux-5.10/arch/loongarch/boot/compressed/
Dhead.S42 la.pcrel t4, decompress_kernel
43 jirl zero, t4, 0
49 PTR_LI t4, KERNEL_ENTRY
50 add.d t4, t4, a3
51 jirl zero, t4, 0
/kernel/linux/linux-6.6/arch/arm64/crypto/
Dcrct10dif-ce-core.S84 t4 .req v18
136 ext t4.8b, ad.8b, ad.8b, #1 // A1
140 pmull t4.8h, t4.8b, fold_consts.8b // F = A1*B
150 tbl t4.16b, {ad.16b}, perm1.16b // A1
154 pmull2 t4.8h, t4.16b, fold_consts.16b // F = A1*B
162 0: eor t4.16b, t4.16b, t8.16b // L = E + F
166 uzp1 t8.2d, t4.2d, t5.2d
167 uzp2 t4.2d, t4.2d, t5.2d
171 // t4 = (L) (P0 + P1) << 8
173 eor t8.16b, t8.16b, t4.16b
[all …]
Dsm4-ce-gcm-core.S53 r4, r5, m4, m5, T4, T5, \ argument
57 ext T4.16b, m5.16b, m5.16b, #8; \
65 pmull T5.1q, m4.1d, T4.1d; \
69 pmull2 T4.1q, m4.2d, T4.2d; \
77 eor T4.16b, T4.16b, T5.16b; \
81 ext T5.16b, RZERO.16b, T4.16b, #8; \
85 ext T4.16b, T4.16b, RZERO.16b, #8; \
93 eor r5.16b, r5.16b, T4.16b; \
136 r4, r5, m4, m5, T4, T5) \ argument
142 ext T4.16b, m5.16b, m5.16b, #8; \
[all …]
/kernel/linux/linux-5.10/arch/arm64/crypto/
Dcrct10dif-ce-core.S84 t4 .req v18
136 ext t4.8b, ad.8b, ad.8b, #1 // A1
140 pmull t4.8h, t4.8b, fold_consts.8b // F = A1*B
150 tbl t4.16b, {ad.16b}, perm1.16b // A1
154 pmull2 t4.8h, t4.16b, fold_consts.16b // F = A1*B
162 0: eor t4.16b, t4.16b, t8.16b // L = E + F
166 uzp1 t8.2d, t4.2d, t5.2d
167 uzp2 t4.2d, t4.2d, t5.2d
171 // t4 = (L) (P0 + P1) << 8
173 eor t8.16b, t8.16b, t4.16b
[all …]
/kernel/linux/linux-5.10/arch/ia64/lib/
Dmemcpy.S36 # define t4 r22 macro
190 sub t4=r0,dst // t4 = -dst
194 shl t4=t4,3 // t4 = 8*(dst & 7)
202 mov pr=t4,0x38 // (p5,p4,p3)=(dst & 7)
225 mov t4=ip
228 adds t4=.memcpy_loops-1b,t4
243 add t4=t0,t4
255 mov b6=t4
/kernel/linux/linux-6.6/arch/ia64/lib/
Dmemcpy.S36 # define t4 r22 macro
190 sub t4=r0,dst // t4 = -dst
194 shl t4=t4,3 // t4 = 8*(dst & 7)
202 mov pr=t4,0x38 // (p5,p4,p3)=(dst & 7)
225 mov t4=ip
228 adds t4=.memcpy_loops-1b,t4
243 add t4=t0,t4
255 mov b6=t4
/kernel/linux/linux-6.6/tools/testing/selftests/bpf/prog_tests/
Dtracing_struct.c46 ASSERT_EQ(skel->bss->t4_a_a, 10, "t4:a.a"); in test_fentry()
47 ASSERT_EQ(skel->bss->t4_b, 1, "t4:b"); in test_fentry()
48 ASSERT_EQ(skel->bss->t4_c, 2, "t4:c"); in test_fentry()
49 ASSERT_EQ(skel->bss->t4_d, 3, "t4:d"); in test_fentry()
50 ASSERT_EQ(skel->bss->t4_e_a, 2, "t4:e.a"); in test_fentry()
51 ASSERT_EQ(skel->bss->t4_e_b, 3, "t4:e.b"); in test_fentry()
52 ASSERT_EQ(skel->bss->t4_ret, 21, "t4 ret"); in test_fentry()

12345678910>>...21