Home
last modified time | relevance | path

Searched refs:t4 (Results 1 – 25 of 30) sorted by relevance

12

/arch/alpha/lib/
Dev67-strrchr.S35 insbl a1, 1, t4 # U : 000000000000ch00
40 or t2, t4, a1 # E : 000000000000chch
47 sll a1, 48, t4 # U : chch000000000000
49 or t4, a1, a1 # E : chch00000000chch
55 mskqh t5, a0, t4 # E : Complete garbage mask
57 cmpbge zero, t4, t4 # E : bits set iff byte is garbage
60 andnot t1, t4, t1 # E : clear garbage from null test
61 andnot t3, t4, t3 # E : clear garbage from char test
83 negq t1, t4 # E : isolate first null byte match
84 and t1, t4, t4 # E :
[all …]
Dstrrchr.S30 lda t4, -1 # .. e1 : build garbage mask
33 mskqh t4, a0, t4 # e0 :
36 cmpbge zero, t4, t4 # .. e1 : bits set iff byte is garbage
38 andnot t1, t4, t1 # .. e1 : clear garbage from null test
39 andnot t3, t4, t3 # e0 : clear garbage from char test
55 negq t1, t4 # e0 : isolate first null byte match
56 and t1, t4, t4 # e1 :
57 subq t4, 1, t5 # e0 : build a mask of the bytes up to...
58 or t4, t5, t4 # e1 : ... and including the null
60 and t3, t4, t3 # e0 : mask out char matches after null
Dstrchr.S26 lda t4, -1 # .. e1 : build garbage mask
29 mskqh t4, a0, t4 # e0 :
32 cmpbge zero, t4, t4 # .. e1 : bits set iff byte is garbage
37 andnot t0, t4, t0 # e0 : clear garbage bits
57 and t0, 0xaa, t4 # e0 :
60 cmovne t4, 1, t4 # .. e1 :
62 addq v0, t4, v0 # .. e1 :
Dev67-strchr.S39 lda t4, -1 # E : build garbage mask
41 mskqh t4, a0, t4 # U : only want relevant part of first quad
49 cmpbge zero, t4, t4 # E : bits set iff byte is garbage
57 andnot t0, t4, t0 # E : clear garbage bits
Dstxcpy.S135 extqh t2, a1, t4 # e0 :
137 or t1, t4, t1 # .. e1 :
237 and a0, 7, t4 # .. e1 : find dest misalignment
245 beq t4, 1f # .. e1 :
250 subq a1, t4, a1 # .. e1 : sub dest misalignment from src addr
255 cmplt t4, t5, t12 # e0 :
Dstxncpy.S160 extqh t2, a1, t4 # e0 :
161 or t1, t4, t1 # e1 : first aligned src word complete
294 and a0, 7, t4 # .. e1 : find dest misalignment
302 beq t4, 1f # .. e1 :
306 subq a1, t4, a1 # .. e1 : sub dest misalignment from src addr
311 1: cmplt t4, t5, t12 # e1 :
332 mskqh t1, t4, t1 # e0 :
Dev6-stxcpy.S157 extqh t2, a1, t4 # U : (stall on a1)
160 or t1, t4, t1 # E :
267 and a0, 7, t4 # E : find dest misalignment
274 beq t4, 1f # U :
283 subq a1, t4, a1 # E : sub dest misalignment from src addr
286 cmplt t4, t5, t12 # E :
Dev6-stxncpy.S199 extqh t2, a1, t4 # U : (3 cycle stall on t2)
201 or t1, t4, t1 # E : first aligned src word complete (stall)
339 and a0, 7, t4 # E : find dest misalignment
346 beq t4, 1f # U :
353 subq a1, t4, a1 # E : sub dest misalignment from src addr
358 1: cmplt t4, t5, t12 # E :
380 mskqh t1, t4, t1 # U :
/arch/ia64/lib/
Dmemcpy.S34 # define t4 r22 macro
188 sub t4=r0,dst // t4 = -dst
192 shl t4=t4,3 // t4 = 8*(dst & 7)
200 mov pr=t4,0x38 // (p5,p4,p3)=(dst & 7)
223 mov t4=ip
226 adds t4=.memcpy_loops-1b,t4
241 add t4=t0,t4
253 mov b6=t4
Dcopy_page.S42 .rotr t1[PIPE_DEPTH], t2[PIPE_DEPTH], t3[PIPE_DEPTH], t4[PIPE_DEPTH], \
77 (p[0]) ld8 t4[0]=[src2],16
78 (EPI) st8 [tgt2]=t4[PIPE_DEPTH-1],16
Dcopy_page_mck.S78 #define t4 r20 macro
83 #define t10 t4 // alias!
142 (p[D]) ld8 t4 = [src1], 3*8 // M1
154 (p[D]) st8 [dst1] = t4, 3*8
Dmemcpy_mck.S46 #define t4 r26 macro
52 #define t10 t4 // alias!
221 EK(.ex_handler, (p[D]) ld8 t4 = [src1], 3*8) // M1
233 EK(.ex_handler, (p[D]) st8 [dst1] = t4, 3*8)
423 EK(.ex_handler_short, (p14) ld1 t4=[src1],2)
432 EK(.ex_handler_short, (p14) st1 [dst1]=t4,2)
479 EX(.ex_handler_short, (p9) ld1 t4=[src1],2)
487 EX(.ex_handler_short, (p9) st1 [dst1] = t4,2)
/arch/arm/crypto/
Dsha256-armv4.pl44 $inp="r1"; $t4="r1";
71 str $inp,[sp,#17*4] @ make room for $t4
88 str $inp,[sp,#17*4] @ make room for $t4
120 ldr $t4,[sp,#`($i+15)%16`*4] @ from future BODY_16_xx
137 @ ldr $t4,[sp,#`($i+14)%16`*4]
140 mov $t2,$t4,ror#$sigma1[0]
142 eor $t2,$t2,$t4,ror#$sigma1[1]
145 eor $t2,$t2,$t4,lsr#$sigma1[2] @ sigma1(X[i+14])
146 ldr $t4,[sp,#`($i+9)%16`*4]
152 add $t1,$t1,$t4 @ X[i]
[all …]
/arch/sparc/lib/
Dmemcpy.S17 #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
20 ldd [%src + (offset) + 0x10], %t4; \
26 st %t4, [%dst + (offset) + 0x10]; \
31 #define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
34 ldd [%src + (offset) + 0x10], %t4; \
38 std %t4, [%dst + (offset) + 0x10]; \
62 #define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
65 ldd [%src - (offset) - 0x10], %t4; \
71 st %t4, [%dst - (offset) - 0x10]; \
76 #define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
[all …]
Dblockops.S26 #define MIRROR_BLOCK(dst, src, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
29 ldd [src + offset + 0x08], t4; \
33 std t4, [dst + offset + 0x08]; \
Dchecksum_32.S18 #define CSUM_BIGCHUNK(buf, offset, sum, t0, t1, t2, t3, t4, t5) \ argument
23 ldd [buf + offset + 0x10], t4; \
27 addxcc t4, sum, sum; \
191 #define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
195 ldd [src + off + 0x10], t4; \
202 addxcc t4, sum, sum; \
203 std t4, [dst + off + 0x10]; \
213 #define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
216 ldd [src + off + 0x10], t4; \
226 st t4, [dst + off + 0x10]; \
[all …]
Dcopy_user.S67 #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
70 ldd [%src + (offset) + 0x10], %t4; \
76 st %t4, [%dst + (offset) + 0x10]; \
81 #define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
84 ldd [%src + (offset) + 0x10], %t4; \
88 std %t4, [%dst + (offset) + 0x10]; \
/arch/x86/crypto/
Dglue_helper-asm-avx2.S61 t1x, t2, t2x, t3, t3x, t4, t5) \ argument
64 vpaddq t0, t0, t4; /* ab: -2:0 ; cd: -2:0 */\
75 add2_le128(t2, t0, t4, t3, t5); /* ab: le2 ; cd: le3 */ \
77 add2_le128(t2, t0, t4, t3, t5); \
79 add2_le128(t2, t0, t4, t3, t5); \
81 add2_le128(t2, t0, t4, t3, t5); \
83 add2_le128(t2, t0, t4, t3, t5); \
85 add2_le128(t2, t0, t4, t3, t5); \
87 add2_le128(t2, t0, t4, t3, t5); \
Dcamellia-aesni-avx2-asm_64.S67 #define roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ argument
72 vbroadcasti128 .Linv_shift_row, t4; \
80 vpshufb t4, x0, x0; \
81 vpshufb t4, x7, x7; \
82 vpshufb t4, x3, x3; \
83 vpshufb t4, x6, x6; \
84 vpshufb t4, x2, x2; \
85 vpshufb t4, x5, x5; \
86 vpshufb t4, x1, x1; \
87 vpshufb t4, x4, x4; \
[all …]
Dpoly1305-sse2-x86_64.S43 #define t4 %xmm6 macro
135 movd s3,t4
136 punpcklqdq t4,t3
142 movd s2,t4
143 punpcklqdq t4,t3
147 movdqa t1,t4
148 punpcklqdq t2,t4
150 paddq t4,t1
174 movd r0,t4
175 punpcklqdq t4,t3
[all …]
Dcamellia-aesni-avx-asm_64.S50 #define roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ argument
55 vmovdqa .Linv_shift_row, t4; \
61 vpshufb t4, x0, x0; \
62 vpshufb t4, x7, x7; \
63 vpshufb t4, x1, x1; \
64 vpshufb t4, x4, x4; \
65 vpshufb t4, x2, x2; \
66 vpshufb t4, x5, x5; \
67 vpshufb t4, x3, x3; \
68 vpshufb t4, x6, x6; \
[all …]
/arch/mips/lib/
Dcsum_partial.S32 #define t4 $12 macro
180 CSUM_BIGCHUNK1(src, 0x00, sum, t0, t1, t3, t4)
191 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
192 CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
193 CSUM_BIGCHUNK(src, 0x40, sum, t0, t1, t3, t4)
194 CSUM_BIGCHUNK(src, 0x60, sum, t0, t1, t3, t4)
206 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
207 CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
215 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
500 LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@)
[all …]
/arch/alpha/include/uapi/asm/
Dregdef.h10 #define t4 $5 macro
/arch/mips/include/asm/
Dregdef.h34 #define t4 $12 macro
/arch/arm64/include/asm/
Dassembler.h356 .macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
358 ldp \t3, \t4, [\src, #16]
363 stnp \t3, \t4, [\dest, #16]

12