/arch/alpha/lib/ |
D | ev67-strrchr.S | 36 insbl a1, 2, t5 # U : 0000000000ch0000 41 sll t5, 8, t3 # U : 00000000ch000000 45 or t5, t3, t3 # E : 00000000chch0000 52 lda t5, -1 # E : build garbage mask 55 mskqh t5, a0, t4 # E : Complete garbage mask 85 subq t4, 1, t5 # E : build a mask of the bytes upto... 86 or t4, t5, t4 # E : ... and including the null 101 lda t5, 0x3f($31) # E : 102 subq t5, t2, t5 # E : Normalize leading zero count 104 addq t6, t5, v0 # E : and add to quadword address
|
D | strrchr.S | 23 sll a1, 8, t5 # e0 : replicate our test character 25 or t5, a1, a1 # e0 : 27 sll a1, 16, t5 # e0 : 29 or t5, a1, a1 # e0 : 31 sll a1, 32, t5 # e0 : 34 or t5, a1, a1 # .. e1 : character replication complete 57 subq t4, 1, t5 # e0 : build a mask of the bytes upto... 58 or t4, t5, t4 # e1 : ... and including the null
|
D | ev67-strchr.S | 33 insbl a1, 1, t5 # U : 000000000000ch00 37 or t5, t3, a1 # E : 000000000000chch 43 inswl a1, 2, t5 # E : 00000000chch0000 47 or a3, t5, t5 # E : 0000chchchch0000 52 or t5, a1, a1 # E : chchchchchchchch
|
D | strchr.S | 23 sll a1, 8, t5 # e0 : replicate the search character 25 or t5, a1, a1 # e0 : 27 sll a1, 16, t5 # e0 : 30 or t5, a1, a1 # .. e1 : 31 sll a1, 32, t5 # e0 : 33 or t5, a1, a1 # e0 :
|
D | stxcpy.S | 238 and a1, 7, t5 # e0 : find src misalignment 255 cmplt t4, t5, t12 # e0 : 259 mskqh t2, t5, t2 # e0 : 274 and a1, 7, t5 # .. e1 : 277 srl t12, t5, t12 # e0 : adjust final null return value
|
D | ev6-stxcpy.S | 268 and a1, 7, t5 # E : find src misalignment 286 cmplt t4, t5, t12 # E : 290 mskqh t2, t5, t2 # U : 303 and a1, 7, t5 # E : 307 srl t12, t5, t12 # U : adjust final null return value
|
D | strncpy_from_user.S | 265 and a1, 7, t5 # e0 : find src misalignment 282 cmplt t4, t5, t12 # e1 : 287 mskqh t2, t5, t2 # e0 : begin src byte validity mask 290 or t8, t10, t5 # .. e1 : test for end-of-count too 292 cmoveq a2, t5, t8 # .. e1 :
|
D | ev6-strncpy_from_user.S | 324 and a1, 7, t5 # E : find src misalignment 345 cmplt t4, t5, t12 # E : 350 mskqh t2, t5, t2 # U : begin src byte validity mask 355 or t8, t10, t5 # E : test for end-of-count too 357 cmoveq a2, t5, t8 # E : Latency=2, extra map slot
|
D | stxncpy.S | 295 and a1, 7, t5 # e0 : find src misalignment 311 1: cmplt t4, t5, t12 # e1 : 319 or t8, t10, t5 # .. e1 : test for end-of-count too 321 cmoveq a2, t5, t8 # .. e1 :
|
D | ev6-stxncpy.S | 340 and a1, 7, t5 # E : find src misalignment 358 1: cmplt t4, t5, t12 # E : 366 or t8, t10, t5 # E : test for end-of-count too 369 cmoveq a2, t5, t8 # E : Latency=2, extra map slot
|
/arch/ia64/lib/ |
D | copy_page.S | 43 t5[PIPE_DEPTH], t6[PIPE_DEPTH], t7[PIPE_DEPTH], t8[PIPE_DEPTH] 80 (p[0]) ld8 t5[0]=[src1],16 81 (EPI) st8 [tgt1]=t5[PIPE_DEPTH-1],16
|
D | copy_page_mck.S | 79 #define t5 t1 // alias! macro 82 #define t9 t5 // alias! 151 (p[D]) ld8 t5 = [src0], 8 158 (p[D]) st8 [dst0] = t5, 8
|
D | memcpy_mck.S | 47 #define t5 t1 // alias! macro 51 #define t9 t5 // alias! 230 EX(.ex_handler, (p[D]) ld8 t5 = [src0], 8) 237 EX(.ex_handler, (p[D]) st8 [dst0] = t5, 8) 436 EX(.ex_handler_short, (p6) ld1 t5=[src0],2) 441 EX(.ex_handler_short, (p6) st1 [dst0]=t5,2) 480 EK(.ex_handler_short, (p10) ld1 t5=[src0],2) 488 EK(.ex_handler_short, (p10) st1 [dst0] = t5,2)
|
/arch/alpha/include/asm/ |
D | regdef.h | 11 #define t5 $6 macro
|
/arch/mips/kernel/ |
D | scall32-o32.S | 128 lw t5, TI_ADDR_LIMIT($28) 130 and t5, t4 131 bltz t5, bad_stack # -> sp is bad 140 1: lw t5, 16(t0) # argument #5 from usp 151 sw t5, 16(sp) # argument #5 to ksp 280 lw t5, 24(sp) 283 sw t5, 20(sp)
|
/arch/mips/include/asm/ |
D | regdef.h | 33 #define t5 $13 macro
|
/arch/sparc/lib/ |
D | memcpy.S | 45 #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 55 st %t5, [%dst + (offset) + 0x14]; \ 59 #define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 90 #define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 100 st %t5, [%dst - (offset) - 0x0c]; \ 104 #define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 128 #define SMOVE_CHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \ argument 131 srl %t0, shir, %t5; \ 134 or %t5, %prev, %t5; \ 146 #define SMOVE_ALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \ argument [all …]
|
D | checksum_32.S | 18 #define CSUM_BIGCHUNK(buf, offset, sum, t0, t1, t2, t3, t4, t5) \ argument 28 addxcc t5, sum, sum; \ 191 #define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 204 addxcc t5, sum, sum; \ 213 #define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 228 st t5, [dst + off + 0x14]; \ 229 addxcc t5, sum, sum; \
|
D | blockops.S | 25 #define MIRROR_BLOCK(dst, src, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
|
D | copy_user.S | 67 #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 77 st %t5, [%dst + (offset) + 0x14]; \ 81 #define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
|
/arch/mips/lib/ |
D | csum_partial.S | 32 #define t5 $13 macro 444 EXC( LOAD t5, UNIT(5)(src), .Ll_exc_copy) 459 EXC( STORE t5, UNIT(5)(dst), .Ls_exc) 460 ADDC(sum, t5)
|
D | memcpy-inatomic.S | 133 #define t5 $13 macro
|
D | memcpy.S | 133 #define t5 $13 macro
|
/arch/mips/cavium-octeon/ |
D | octeon-memcpy.S | 118 #define t5 $13 macro
|