Home
last modified time | relevance | path

Searched refs:t5 (Results 1 – 24 of 24) sorted by relevance

/arch/alpha/lib/
Dev67-strrchr.S36 insbl a1, 2, t5 # U : 0000000000ch0000
41 sll t5, 8, t3 # U : 00000000ch000000
45 or t5, t3, t3 # E : 00000000chch0000
52 lda t5, -1 # E : build garbage mask
55 mskqh t5, a0, t4 # E : Complete garbage mask
85 subq t4, 1, t5 # E : build a mask of the bytes upto...
86 or t4, t5, t4 # E : ... and including the null
101 lda t5, 0x3f($31) # E :
102 subq t5, t2, t5 # E : Normalize leading zero count
104 addq t6, t5, v0 # E : and add to quadword address
Dstrrchr.S23 sll a1, 8, t5 # e0 : replicate our test character
25 or t5, a1, a1 # e0 :
27 sll a1, 16, t5 # e0 :
29 or t5, a1, a1 # e0 :
31 sll a1, 32, t5 # e0 :
34 or t5, a1, a1 # .. e1 : character replication complete
57 subq t4, 1, t5 # e0 : build a mask of the bytes upto...
58 or t4, t5, t4 # e1 : ... and including the null
Dev67-strchr.S33 insbl a1, 1, t5 # U : 000000000000ch00
37 or t5, t3, a1 # E : 000000000000chch
43 inswl a1, 2, t5 # E : 00000000chch0000
47 or a3, t5, t5 # E : 0000chchchch0000
52 or t5, a1, a1 # E : chchchchchchchch
Dstrchr.S23 sll a1, 8, t5 # e0 : replicate the search character
25 or t5, a1, a1 # e0 :
27 sll a1, 16, t5 # e0 :
30 or t5, a1, a1 # .. e1 :
31 sll a1, 32, t5 # e0 :
33 or t5, a1, a1 # e0 :
Dstxcpy.S238 and a1, 7, t5 # e0 : find src misalignment
255 cmplt t4, t5, t12 # e0 :
259 mskqh t2, t5, t2 # e0 :
274 and a1, 7, t5 # .. e1 :
277 srl t12, t5, t12 # e0 : adjust final null return value
Dev6-stxcpy.S268 and a1, 7, t5 # E : find src misalignment
286 cmplt t4, t5, t12 # E :
290 mskqh t2, t5, t2 # U :
303 and a1, 7, t5 # E :
307 srl t12, t5, t12 # U : adjust final null return value
Dstrncpy_from_user.S265 and a1, 7, t5 # e0 : find src misalignment
282 cmplt t4, t5, t12 # e1 :
287 mskqh t2, t5, t2 # e0 : begin src byte validity mask
290 or t8, t10, t5 # .. e1 : test for end-of-count too
292 cmoveq a2, t5, t8 # .. e1 :
Dev6-strncpy_from_user.S324 and a1, 7, t5 # E : find src misalignment
345 cmplt t4, t5, t12 # E :
350 mskqh t2, t5, t2 # U : begin src byte validity mask
355 or t8, t10, t5 # E : test for end-of-count too
357 cmoveq a2, t5, t8 # E : Latency=2, extra map slot
Dstxncpy.S295 and a1, 7, t5 # e0 : find src misalignment
311 1: cmplt t4, t5, t12 # e1 :
319 or t8, t10, t5 # .. e1 : test for end-of-count too
321 cmoveq a2, t5, t8 # .. e1 :
Dev6-stxncpy.S340 and a1, 7, t5 # E : find src misalignment
358 1: cmplt t4, t5, t12 # E :
366 or t8, t10, t5 # E : test for end-of-count too
369 cmoveq a2, t5, t8 # E : Latency=2, extra map slot
/arch/ia64/lib/
Dcopy_page.S43 t5[PIPE_DEPTH], t6[PIPE_DEPTH], t7[PIPE_DEPTH], t8[PIPE_DEPTH]
80 (p[0]) ld8 t5[0]=[src1],16
81 (EPI) st8 [tgt1]=t5[PIPE_DEPTH-1],16
Dcopy_page_mck.S79 #define t5 t1 // alias! macro
82 #define t9 t5 // alias!
151 (p[D]) ld8 t5 = [src0], 8
158 (p[D]) st8 [dst0] = t5, 8
Dmemcpy_mck.S47 #define t5 t1 // alias! macro
51 #define t9 t5 // alias!
230 EX(.ex_handler, (p[D]) ld8 t5 = [src0], 8)
237 EX(.ex_handler, (p[D]) st8 [dst0] = t5, 8)
436 EX(.ex_handler_short, (p6) ld1 t5=[src0],2)
441 EX(.ex_handler_short, (p6) st1 [dst0]=t5,2)
480 EK(.ex_handler_short, (p10) ld1 t5=[src0],2)
488 EK(.ex_handler_short, (p10) st1 [dst0] = t5,2)
/arch/alpha/include/asm/
Dregdef.h11 #define t5 $6 macro
/arch/mips/kernel/
Dscall32-o32.S128 lw t5, TI_ADDR_LIMIT($28)
130 and t5, t4
131 bltz t5, bad_stack # -> sp is bad
140 1: lw t5, 16(t0) # argument #5 from usp
151 sw t5, 16(sp) # argument #5 to ksp
280 lw t5, 24(sp)
283 sw t5, 20(sp)
/arch/mips/include/asm/
Dregdef.h33 #define t5 $13 macro
/arch/sparc/lib/
Dmemcpy.S45 #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
55 st %t5, [%dst + (offset) + 0x14]; \
59 #define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
90 #define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
100 st %t5, [%dst - (offset) - 0x0c]; \
104 #define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
128 #define SMOVE_CHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \ argument
131 srl %t0, shir, %t5; \
134 or %t5, %prev, %t5; \
146 #define SMOVE_ALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \ argument
[all …]
Dchecksum_32.S18 #define CSUM_BIGCHUNK(buf, offset, sum, t0, t1, t2, t3, t4, t5) \ argument
28 addxcc t5, sum, sum; \
191 #define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
204 addxcc t5, sum, sum; \
213 #define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
228 st t5, [dst + off + 0x14]; \
229 addxcc t5, sum, sum; \
Dblockops.S25 #define MIRROR_BLOCK(dst, src, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
Dcopy_user.S67 #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
77 st %t5, [%dst + (offset) + 0x14]; \
81 #define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
/arch/mips/lib/
Dcsum_partial.S32 #define t5 $13 macro
444 EXC( LOAD t5, UNIT(5)(src), .Ll_exc_copy)
459 EXC( STORE t5, UNIT(5)(dst), .Ls_exc)
460 ADDC(sum, t5)
Dmemcpy-inatomic.S133 #define t5 $13 macro
Dmemcpy.S133 #define t5 $13 macro
/arch/mips/cavium-octeon/
Docteon-memcpy.S118 #define t5 $13 macro