Home
last modified time | relevance | path

Searched refs:t4 (Results 1 – 25 of 26) sorted by relevance

12

/arch/alpha/lib/
Dev67-strrchr.S35 insbl a1, 1, t4 # U : 000000000000ch00
40 or t2, t4, a1 # E : 000000000000chch
47 sll a1, 48, t4 # U : chch000000000000
49 or t4, a1, a1 # E : chch00000000chch
55 mskqh t5, a0, t4 # E : Complete garbage mask
57 cmpbge zero, t4, t4 # E : bits set iff byte is garbage
60 andnot t1, t4, t1 # E : clear garbage from null test
61 andnot t3, t4, t3 # E : clear garbage from char test
83 negq t1, t4 # E : isolate first null byte match
84 and t1, t4, t4 # E :
[all …]
Dstrrchr.S30 lda t4, -1 # .. e1 : build garbage mask
33 mskqh t4, a0, t4 # e0 :
36 cmpbge zero, t4, t4 # .. e1 : bits set iff byte is garbage
38 andnot t1, t4, t1 # .. e1 : clear garbage from null test
39 andnot t3, t4, t3 # e0 : clear garbage from char test
55 negq t1, t4 # e0 : isolate first null byte match
56 and t1, t4, t4 # e1 :
57 subq t4, 1, t5 # e0 : build a mask of the bytes upto...
58 or t4, t5, t4 # e1 : ... and including the null
60 and t3, t4, t3 # e0 : mask out char matches after null
Dstrchr.S26 lda t4, -1 # .. e1 : build garbage mask
29 mskqh t4, a0, t4 # e0 :
32 cmpbge zero, t4, t4 # .. e1 : bits set iff byte is garbage
37 andnot t0, t4, t0 # e0 : clear garbage bits
57 and t0, 0xaa, t4 # e0 :
60 cmovne t4, 1, t4 # .. e1 :
62 addq v0, t4, v0 # .. e1 :
Dev67-strchr.S39 lda t4, -1 # E : build garbage mask
41 mskqh t4, a0, t4 # U : only want relevant part of first quad
49 cmpbge zero, t4, t4 # E : bits set iff byte is garbage
57 andnot t0, t4, t0 # E : clear garbage bits
Dev6-strncpy_from_user.S58 xor a0, a1, t4 # E :
65 and t4, 7, t4 # E : misalignment between the two
69 bne t4, $unaligned # U :
180 extqh t2, a1, t4 # U :
182 or t1, t4, t1 # E : first aligned src word complete
323 and a0, 7, t4 # E : find dest misalignment
331 beq t4, 1f # U :
342 subq a1, t4, a1 # E : sub dest misalignment from src addr
345 cmplt t4, t5, t12 # E :
371 mskqh t1, t4, t1 # U :
[all …]
Dstrlen_user.S74 and t1, 0xaa, t4
77 cmovne t4, 1, t4
79 addq v0, t4, v0
Dstrncpy_from_user.S146 extqh t2, a1, t4 # e0 :
147 or t1, t4, t1 # e1 : first aligned src word complete
264 and a0, 7, t4 # .. e1 : find dest misalignment
272 beq t4, 1f # .. e1 :
277 subq a1, t4, a1 # .. e1 : sub dest misalignment from src addr
282 cmplt t4, t5, t12 # e1 :
303 mskqh t1, t4, t1 # e0 :
316 zapnot t0, t12, t4 # was last byte written null?
317 cmovne t4, 1, t4
329 addq t0, t4, t0 # add one if we filled the buffer
Dstxcpy.S135 extqh t2, a1, t4 # e0 :
137 or t1, t4, t1 # .. e1 :
237 and a0, 7, t4 # .. e1 : find dest misalignment
245 beq t4, 1f # .. e1 :
250 subq a1, t4, a1 # .. e1 : sub dest misalignment from src addr
255 cmplt t4, t5, t12 # e0 :
Dstxncpy.S160 extqh t2, a1, t4 # e0 :
161 or t1, t4, t1 # e1 : first aligned src word complete
294 and a0, 7, t4 # .. e1 : find dest misalignment
302 beq t4, 1f # .. e1 :
306 subq a1, t4, a1 # .. e1 : sub dest misalignment from src addr
311 1: cmplt t4, t5, t12 # e1 :
332 mskqh t1, t4, t1 # e0 :
Dev6-stxcpy.S157 extqh t2, a1, t4 # U : (stall on a1)
160 or t1, t4, t1 # E :
267 and a0, 7, t4 # E : find dest misalignment
274 beq t4, 1f # U :
283 subq a1, t4, a1 # E : sub dest misalignment from src addr
286 cmplt t4, t5, t12 # E :
Dev6-stxncpy.S199 extqh t2, a1, t4 # U : (3 cycle stall on t2)
201 or t1, t4, t1 # E : first aligned src word complete (stall)
339 and a0, 7, t4 # E : find dest misalignment
346 beq t4, 1f # U :
353 subq a1, t4, a1 # E : sub dest misalignment from src addr
358 1: cmplt t4, t5, t12 # E :
380 mskqh t1, t4, t1 # U :
/arch/ia64/lib/
Dmemcpy.S34 # define t4 r22 macro
188 sub t4=r0,dst // t4 = -dst
192 shl t4=t4,3 // t4 = 8*(dst & 7)
200 mov pr=t4,0x38 // (p5,p4,p3)=(dst & 7)
223 mov t4=ip
226 adds t4=.memcpy_loops-1b,t4
241 add t4=t0,t4
253 mov b6=t4
Dcopy_page.S42 .rotr t1[PIPE_DEPTH], t2[PIPE_DEPTH], t3[PIPE_DEPTH], t4[PIPE_DEPTH], \
77 (p[0]) ld8 t4[0]=[src2],16
78 (EPI) st8 [tgt2]=t4[PIPE_DEPTH-1],16
Dcopy_page_mck.S78 #define t4 r20 macro
83 #define t10 t4 // alias!
142 (p[D]) ld8 t4 = [src1], 3*8 // M1
154 (p[D]) st8 [dst1] = t4, 3*8
Dmemcpy_mck.S46 #define t4 r26 macro
52 #define t10 t4 // alias!
221 EK(.ex_handler, (p[D]) ld8 t4 = [src1], 3*8) // M1
233 EK(.ex_handler, (p[D]) st8 [dst1] = t4, 3*8)
423 EK(.ex_handler_short, (p14) ld1 t4=[src1],2)
432 EK(.ex_handler_short, (p14) st1 [dst1]=t4,2)
479 EX(.ex_handler_short, (p9) ld1 t4=[src1],2)
487 EX(.ex_handler_short, (p9) st1 [dst1] = t4,2)
/arch/mips/lib/
Dcsum_partial.S31 #define t4 $12 macro
173 CSUM_BIGCHUNK1(src, 0x00, sum, t0, t1, t3, t4)
184 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
185 CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
186 CSUM_BIGCHUNK(src, 0x40, sum, t0, t1, t3, t4)
187 CSUM_BIGCHUNK(src, 0x60, sum, t0, t1, t3, t4)
199 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
200 CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
208 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
443 EXC( LOAD t4, UNIT(4)(src), .Ll_exc_copy)
[all …]
Dmemcpy-inatomic.S132 #define t4 $12 macro
236 EXC( LOAD t4, UNIT(4)(src), .Ll_exc_copy)
246 STORE t4, UNIT(-4)(dst)
Dmemcpy.S132 #define t4 $12 macro
241 EXC( LOAD t4, UNIT(4)(src), .Ll_exc_copy)
251 EXC( STORE t4, UNIT(-4)(dst), .Ls_exc_p4u)
/arch/sparc/lib/
Dblockops.S25 #define MIRROR_BLOCK(dst, src, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
28 ldd [src + offset + 0x08], t4; \
32 std t4, [dst + offset + 0x08]; \
Dmemcpy.S45 #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
48 ldd [%src + (offset) + 0x10], %t4; \
54 st %t4, [%dst + (offset) + 0x10]; \
59 #define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
62 ldd [%src + (offset) + 0x10], %t4; \
66 std %t4, [%dst + (offset) + 0x10]; \
90 #define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
93 ldd [%src - (offset) - 0x10], %t4; \
99 st %t4, [%dst - (offset) - 0x10]; \
104 #define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
[all …]
Dchecksum_32.S18 #define CSUM_BIGCHUNK(buf, offset, sum, t0, t1, t2, t3, t4, t5) \ argument
23 ldd [buf + offset + 0x10], t4; \
27 addxcc t4, sum, sum; \
191 #define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
195 ldd [src + off + 0x10], t4; \
202 addxcc t4, sum, sum; \
203 std t4, [dst + off + 0x10]; \
213 #define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
216 ldd [src + off + 0x10], t4; \
226 st t4, [dst + off + 0x10]; \
[all …]
Dcopy_user.S67 #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
70 ldd [%src + (offset) + 0x10], %t4; \
76 st %t4, [%dst + (offset) + 0x10]; \
81 #define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
84 ldd [%src + (offset) + 0x10], %t4; \
88 std %t4, [%dst + (offset) + 0x10]; \
/arch/alpha/include/asm/
Dregdef.h10 #define t4 $5 macro
/arch/mips/include/asm/
Dregdef.h32 #define t4 $12 macro
/arch/mips/kernel/
Dscall32-o32.S129 addu t4, t0, 32
130 and t5, t4
279 lw t4, 20(sp)
282 sw t4, 16(sp)

12