Home
last modified time | relevance | path

Searched refs:t4 (Results 1 – 25 of 36) sorted by relevance

12

/arch/alpha/lib/
Dev67-strrchr.S36 insbl a1, 1, t4 # U : 000000000000ch00
41 or t2, t4, a1 # E : 000000000000chch
48 sll a1, 48, t4 # U : chch000000000000
50 or t4, a1, a1 # E : chch00000000chch
56 mskqh t5, a0, t4 # E : Complete garbage mask
58 cmpbge zero, t4, t4 # E : bits set iff byte is garbage
61 andnot t1, t4, t1 # E : clear garbage from null test
62 andnot t3, t4, t3 # E : clear garbage from char test
84 negq t1, t4 # E : isolate first null byte match
85 and t1, t4, t4 # E :
[all …]
Dstrrchr.S31 lda t4, -1 # .. e1 : build garbage mask
34 mskqh t4, a0, t4 # e0 :
37 cmpbge zero, t4, t4 # .. e1 : bits set iff byte is garbage
39 andnot t1, t4, t1 # .. e1 : clear garbage from null test
40 andnot t3, t4, t3 # e0 : clear garbage from char test
56 negq t1, t4 # e0 : isolate first null byte match
57 and t1, t4, t4 # e1 :
58 subq t4, 1, t5 # e0 : build a mask of the bytes up to...
59 or t4, t5, t4 # e1 : ... and including the null
61 and t3, t4, t3 # e0 : mask out char matches after null
Dstrchr.S27 lda t4, -1 # .. e1 : build garbage mask
30 mskqh t4, a0, t4 # e0 :
33 cmpbge zero, t4, t4 # .. e1 : bits set iff byte is garbage
38 andnot t0, t4, t0 # e0 : clear garbage bits
58 and t0, 0xaa, t4 # e0 :
61 cmovne t4, 1, t4 # .. e1 :
63 addq v0, t4, v0 # .. e1 :
Dev67-strchr.S40 lda t4, -1 # E : build garbage mask
42 mskqh t4, a0, t4 # U : only want relevant part of first quad
50 cmpbge zero, t4, t4 # E : bits set iff byte is garbage
58 andnot t0, t4, t0 # E : clear garbage bits
Dstxcpy.S136 extqh t2, a1, t4 # e0 :
138 or t1, t4, t1 # .. e1 :
238 and a0, 7, t4 # .. e1 : find dest misalignment
246 beq t4, 1f # .. e1 :
251 subq a1, t4, a1 # .. e1 : sub dest misalignment from src addr
256 cmplt t4, t5, t12 # e0 :
Dstxncpy.S161 extqh t2, a1, t4 # e0 :
162 or t1, t4, t1 # e1 : first aligned src word complete
295 and a0, 7, t4 # .. e1 : find dest misalignment
303 beq t4, 1f # .. e1 :
307 subq a1, t4, a1 # .. e1 : sub dest misalignment from src addr
312 1: cmplt t4, t5, t12 # e1 :
333 mskqh t1, t4, t1 # e0 :
Dev6-stxcpy.S158 extqh t2, a1, t4 # U : (stall on a1)
161 or t1, t4, t1 # E :
268 and a0, 7, t4 # E : find dest misalignment
275 beq t4, 1f # U :
284 subq a1, t4, a1 # E : sub dest misalignment from src addr
287 cmplt t4, t5, t12 # E :
Dev6-stxncpy.S200 extqh t2, a1, t4 # U : (3 cycle stall on t2)
202 or t1, t4, t1 # E : first aligned src word complete (stall)
340 and a0, 7, t4 # E : find dest misalignment
347 beq t4, 1f # U :
354 subq a1, t4, a1 # E : sub dest misalignment from src addr
359 1: cmplt t4, t5, t12 # E :
381 mskqh t1, t4, t1 # U :
/arch/ia64/lib/
Dmemcpy.S36 # define t4 r22 macro
190 sub t4=r0,dst // t4 = -dst
194 shl t4=t4,3 // t4 = 8*(dst & 7)
202 mov pr=t4,0x38 // (p5,p4,p3)=(dst & 7)
225 mov t4=ip
228 adds t4=.memcpy_loops-1b,t4
243 add t4=t0,t4
255 mov b6=t4
Dcopy_page.S44 .rotr t1[PIPE_DEPTH], t2[PIPE_DEPTH], t3[PIPE_DEPTH], t4[PIPE_DEPTH], \
79 (p[0]) ld8 t4[0]=[src2],16
80 (EPI) st8 [tgt2]=t4[PIPE_DEPTH-1],16
Dcopy_page_mck.S80 #define t4 r20 macro
85 #define t10 t4 // alias!
144 (p[D]) ld8 t4 = [src1], 3*8 // M1
156 (p[D]) st8 [dst1] = t4, 3*8
/arch/arm/crypto/
Dsha256-armv4.pl44 $inp="r1"; $t4="r1";
71 str $inp,[sp,#17*4] @ make room for $t4
88 str $inp,[sp,#17*4] @ make room for $t4
120 ldr $t4,[sp,#`($i+15)%16`*4] @ from future BODY_16_xx
137 @ ldr $t4,[sp,#`($i+14)%16`*4]
140 mov $t2,$t4,ror#$sigma1[0]
142 eor $t2,$t2,$t4,ror#$sigma1[1]
145 eor $t2,$t2,$t4,lsr#$sigma1[2] @ sigma1(X[i+14])
146 ldr $t4,[sp,#`($i+9)%16`*4]
152 add $t1,$t1,$t4 @ X[i]
[all …]
Daes-cipher-core.S45 .macro __hround, out0, out1, in0, in1, in2, in3, t3, t4, enc, sz, op, oldcpsr
68 __select \t4, \in0, 3
71 __select \t4, \in2, 3
75 __load \t4, \t4, 3, \sz, \op
90 eor \out1, \out1, \t4, ror #8
Daes-neonbs-core.S305 t0, t1, t2, t3, t4, t5, t6, t7, inv
313 vext.8 \t4, \x4, \x4, #12
316 veor \x4, \x4, \t4
333 veor \t4, \t4, \x3
339 veor \t4, \t4, \x7
345 veor \x2, \t0, \t4
354 veor \x3, \t0, \t4
361 t0, t1, t2, t3, t4, t5, t6, t7
366 vld1.8 {\t4-\t5}, [bskey, :256]!
371 veor \x4, \x4, \t4
[all …]
/arch/x86/crypto/
Dglue_helper-asm-avx2.S61 t1x, t2, t2x, t3, t3x, t4, t5) \ argument
64 vpaddq t0, t0, t4; /* ab: -2:0 ; cd: -2:0 */\
75 add2_le128(t2, t0, t4, t3, t5); /* ab: le2 ; cd: le3 */ \
77 add2_le128(t2, t0, t4, t3, t5); \
79 add2_le128(t2, t0, t4, t3, t5); \
81 add2_le128(t2, t0, t4, t3, t5); \
83 add2_le128(t2, t0, t4, t3, t5); \
85 add2_le128(t2, t0, t4, t3, t5); \
87 add2_le128(t2, t0, t4, t3, t5); \
Dcamellia-aesni-avx2-asm_64.S68 #define roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ argument
73 vbroadcasti128 .Linv_shift_row, t4; \
81 vpshufb t4, x0, x0; \
82 vpshufb t4, x7, x7; \
83 vpshufb t4, x3, x3; \
84 vpshufb t4, x6, x6; \
85 vpshufb t4, x2, x2; \
86 vpshufb t4, x5, x5; \
87 vpshufb t4, x1, x1; \
88 vpshufb t4, x4, x4; \
[all …]
Dpoly1305-sse2-x86_64.S45 #define t4 %xmm6 macro
137 movd s3,t4
138 punpcklqdq t4,t3
144 movd s2,t4
145 punpcklqdq t4,t3
149 movdqa t1,t4
150 punpcklqdq t2,t4
152 paddq t4,t1
176 movd r0,t4
177 punpcklqdq t4,t3
[all …]
Dcamellia-aesni-avx-asm_64.S51 #define roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ argument
56 vmovdqa .Linv_shift_row, t4; \
62 vpshufb t4, x0, x0; \
63 vpshufb t4, x7, x7; \
64 vpshufb t4, x1, x1; \
65 vpshufb t4, x4, x4; \
66 vpshufb t4, x2, x2; \
67 vpshufb t4, x5, x5; \
68 vpshufb t4, x3, x3; \
69 vpshufb t4, x6, x6; \
[all …]
/arch/sparc/lib/
Dblockops.S28 #define MIRROR_BLOCK(dst, src, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
31 ldd [src + offset + 0x08], t4; \
35 std t4, [dst + offset + 0x08]; \
Dchecksum_32.S20 #define CSUM_BIGCHUNK(buf, offset, sum, t0, t1, t2, t3, t4, t5) \ argument
25 ldd [buf + offset + 0x10], t4; \
29 addxcc t4, sum, sum; \
194 #define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
198 ldd [src + off + 0x10], t4; \
205 addxcc t4, sum, sum; \
206 std t4, [dst + off + 0x10]; \
216 #define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
219 ldd [src + off + 0x10], t4; \
229 st t4, [dst + off + 0x10]; \
[all …]
Dcopy_user.S69 #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
72 ldd [%src + (offset) + 0x10], %t4; \
78 st %t4, [%dst + (offset) + 0x10]; \
83 #define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
86 ldd [%src + (offset) + 0x10], %t4; \
90 std %t4, [%dst + (offset) + 0x10]; \
/arch/mips/lib/
Dcsum_partial.S33 #define t4 $12 macro
182 CSUM_BIGCHUNK1(src, 0x00, sum, t0, t1, t3, t4)
193 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
194 CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
195 CSUM_BIGCHUNK(src, 0x40, sum, t0, t1, t3, t4)
196 CSUM_BIGCHUNK(src, 0x60, sum, t0, t1, t3, t4)
208 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
209 CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
217 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
503 LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@)
[all …]
/arch/arm64/crypto/
Dghash-ce-core.S28 t4 .req v11
90 pmull\t t4.8h, \ad, \b1\().\nb // E = A*B1
98 eor t3.16b, t3.16b, t4.16b // L = E + F
102 uzp1 t4.2d, t3.2d, t5.2d
109 eor t4.16b, t4.16b, t3.16b
117 eor t4.16b, t4.16b, t3.16b
120 zip2 t5.2d, t4.2d, t3.2d
121 zip1 t3.2d, t4.2d, t3.2d
/arch/alpha/include/uapi/asm/
Dregdef.h11 #define t4 $5 macro
/arch/mips/include/asm/
Dregdef.h34 #define t4 $12 macro

12