/arch/alpha/lib/ |
D | stxcpy.S | 72 negq t8, t6 # e0 : find low bit set 73 and t8, t6, t12 # e1 (stall) 77 and t12, 0x80, t6 # e0 : 78 bne t6, 1f # .. e1 (zdb) 83 subq t12, 1, t6 # .. e1 : 84 zapnot t1, t6, t1 # e0 : clear src bytes >= null 85 or t12, t6, t8 # .. e1 : 141 or t1, t6, t6 # e0 : 142 cmpbge zero, t6, t8 # .. e1 : 143 lda t6, -1 # e0 : for masking just below [all …]
|
D | stxncpy.S | 90 and t12, 0x80, t6 # e0 : 91 bne t6, 1f # .. e1 (zdb) 96 subq t12, 1, t6 # .. e1 : 97 or t12, t6, t8 # e0 : 164 or t0, t6, t6 # e1 : mask original data for zero test 165 cmpbge zero, t6, t8 # e0 : 167 lda t6, -1 # e0 : 170 mskql t6, a1, t6 # e0 : mask out bits already seen 173 or t6, t2, t2 # .. e1 : 242 or t8, t10, t6 # e1 : [all …]
|
D | ev6-stxcpy.S | 88 negq t8, t6 # E : find low bit set 89 and t8, t6, t12 # E : (stall) 92 and t12, 0x80, t6 # E : (stall) 93 bne t6, 1f # U : (stall) 98 subq t12, 1, t6 # E : 99 zapnot t1, t6, t1 # U : clear src bytes >= null (stall) 100 or t12, t6, t8 # E : (stall) 164 or t1, t6, t6 # E : 165 cmpbge zero, t6, t8 # E : (stall) 166 lda t6, -1 # E : for masking just below [all …]
|
D | ev6-stxncpy.S | 116 and t12, 0x80, t6 # E : (stall) 117 bne t6, 1f # U : (stall) 122 subq t12, 1, t6 # E : 123 or t12, t6, t8 # E : (stall) 204 or t0, t6, t6 # E : mask original data for zero test (stall) 206 cmpbge zero, t6, t8 # E : 208 lda t6, -1 # E : 212 mskql t6, a1, t6 # U : mask out bits already seen 214 or t6, t2, t2 # E : (stall) 287 or t8, t10, t6 # E : (stall) [all …]
|
D | strrchr.S | 22 mov zero, t6 # .. e1 : t6 is last match aligned addr 45 cmovne t3, v0, t6 # .. e1 : save previous comparisons match 62 cmovne t3, v0, t6 # e0 : 79 addq t6, t0, v0 # .. e1 : add our aligned base ptr to the mix
|
D | ev67-strrchr.S | 39 mov zero, t6 # E : t6 is last match aligned addr 67 cmovne t3, v0, t6 # E : save previous comparisons match 93 cmovne t3, v0, t6 # E : 104 addq t6, t5, v0 # E : and add to quadword address
|
/arch/ia64/lib/ |
D | copy_page.S | 44 t5[PIPE_DEPTH], t6[PIPE_DEPTH], t7[PIPE_DEPTH], t8[PIPE_DEPTH] 83 (p[0]) ld8 t6[0]=[src2],16 84 (EPI) st8 [tgt2]=t6[PIPE_DEPTH-1],16
|
D | copy_page_mck.S | 81 #define t6 t2 // alias! macro 86 #define t12 t6 // alias! 157 (p[D]) ld8 t6 = [src0], 3*8 164 (p[D]) st8 [dst0] = t6, 3*8
|
D | memcpy_mck.S | 49 #define t6 t2 // alias! macro 55 #define t12 t6 // alias! 237 EX(.ex_handler, (p[D]) ld8 t6 = [src0], 3*8) 244 EX(.ex_handler, (p[D]) st8 [dst0] = t6, 3*8) 439 EK(.ex_handler_short, (p8) ld1 t6=[src1],2) 444 EK(.ex_handler_short, (p8) st1 [dst1]=t6,2) 486 EX(.ex_handler_short, (p11) ld1 t6=[src1],2) 493 EX(.ex_handler_short, (p11) st1 [dst1] = t6,2)
|
/arch/sparc/lib/ |
D | blockops.S | 27 #define MIRROR_BLOCK(dst, src, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 31 ldd [src + offset + 0x00], t6; \ 35 std t6, [dst + offset + 0x00];
|
D | memcpy.S | 18 #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 22 ldd [%src + (offset) + 0x18], %t6; \ 29 st %t6, [%dst + (offset) + 0x18]; \ 32 #define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 36 ldd [%src + (offset) + 0x18], %t6; \ 40 std %t6, [%dst + (offset) + 0x18];
|
D | copy_user.S | 68 #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 72 ldd [%src + (offset) + 0x18], %t6; \ 79 st %t6, [%dst + (offset) + 0x18]; \ 82 #define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 86 ldd [%src + (offset) + 0x18], %t6; \ 90 std %t6, [%dst + (offset) + 0x18];
|
D | checksum_32.S | 193 #define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 199 ldd [src + off + 0x18], t6; \ 207 std t6, [dst + off + 0x18]; \ 208 addxcc t6, sum, sum; \ 215 #define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 219 ldd [src + off + 0x18], t6; \ 232 st t6, [dst + off + 0x18]; \ 233 addxcc t6, sum, sum; \
|
/arch/x86/crypto/ |
D | camellia-aesni-avx-asm_64.S | 51 #define roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ argument 74 filter_8bit(x0, t0, t1, t7, t6); \ 75 filter_8bit(x7, t0, t1, t7, t6); \ 76 filter_8bit(x1, t0, t1, t7, t6); \ 77 filter_8bit(x4, t0, t1, t7, t6); \ 78 filter_8bit(x2, t0, t1, t7, t6); \ 79 filter_8bit(x5, t0, t1, t7, t6); \ 83 filter_8bit(x3, t2, t3, t7, t6); \ 84 filter_8bit(x6, t2, t3, t7, t6); \ 101 filter_8bit(x0, t0, t1, t7, t6); \ [all …]
|
D | camellia-aesni-avx2-asm_64.S | 68 #define roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ argument 76 vbroadcasti128 .Lpre_tf_hi_s1, t6; \ 92 filter_8bit(x0, t5, t6, t7, t4); \ 93 filter_8bit(x7, t5, t6, t7, t4); \ 100 filter_8bit(x2, t5, t6, t7, t4); \ 101 filter_8bit(x5, t5, t6, t7, t4); \ 102 filter_8bit(x1, t5, t6, t7, t4); \ 103 filter_8bit(x4, t5, t6, t7, t4); \ 108 vextracti128 $1, x2, t6##_x; \ 127 vaesenclast t4##_x, t6##_x, t6##_x; \ [all …]
|
/arch/alpha/include/uapi/asm/ |
D | regdef.h | 12 #define t6 $7 macro
|
/arch/mips/include/asm/ |
D | regdef.h | 38 #define t6 $14 macro
|
/arch/mips/lib/ |
D | memcpy.S | 176 #define t6 $14 macro 564 bnez t6, .Ldone\@ /* Skip the zeroing part if inatomic */ 678 li t6, 1 692 li t6, 0 /* not inatomic */ 708 li t6, 1 716 li t6, 0 /* not inatomic */
|
D | csum_partial.S | 34 #define t6 $14 macro 502 LOAD(t6, UNIT(6)(src), .Ll_exc_copy\@) 518 STORE(t6, UNIT(6)(dst), .Ls_exc\@) 519 ADDC(t6, t7) 521 ADDC(sum, t6)
|
/arch/arm64/include/asm/ |
D | assembler.h | 397 .macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req 400 ldp \t5, \t6, [\src, #32] 405 stnp \t5, \t6, [\dest, #32]
|
/arch/mips/kernel/ |
D | scall32-o32.S | 68 load_a5: user_lw(t6, 20(t0)) # argument #6 from usp 74 sw t6, 20(sp) # argument #6 to ksp 164 li t6, 0 206 lw t6, 28(sp) 209 sw t6, 24(sp)
|
D | pm-cps.c | 78 t0, t1, t2, t3, t4, t5, t6, t7, enumerator
|
/arch/tile/kernel/ |
D | hvglue_trace.c | 159 #define __HV_DECL6(t6, a6, ...) t6 a6, __HV_DECL5(__VA_ARGS__) argument 168 #define __HV_PASS6(t6, a6, ...) a6, __HV_PASS5(__VA_ARGS__) argument
|
/arch/mips/cavium-octeon/ |
D | octeon-memcpy.S | 114 #define t6 $14 macro
|