/arch/alpha/lib/ |
D | ev67-strrchr.S | 35 insbl a1, 1, t4 # U : 000000000000ch00 40 or t2, t4, a1 # E : 000000000000chch 47 sll a1, 48, t4 # U : chch000000000000 49 or t4, a1, a1 # E : chch00000000chch 55 mskqh t5, a0, t4 # E : Complete garbage mask 57 cmpbge zero, t4, t4 # E : bits set iff byte is garbage 60 andnot t1, t4, t1 # E : clear garbage from null test 61 andnot t3, t4, t3 # E : clear garbage from char test 83 negq t1, t4 # E : isolate first null byte match 84 and t1, t4, t4 # E : [all …]
|
D | strrchr.S | 30 lda t4, -1 # .. e1 : build garbage mask 33 mskqh t4, a0, t4 # e0 : 36 cmpbge zero, t4, t4 # .. e1 : bits set iff byte is garbage 38 andnot t1, t4, t1 # .. e1 : clear garbage from null test 39 andnot t3, t4, t3 # e0 : clear garbage from char test 55 negq t1, t4 # e0 : isolate first null byte match 56 and t1, t4, t4 # e1 : 57 subq t4, 1, t5 # e0 : build a mask of the bytes up to... 58 or t4, t5, t4 # e1 : ... and including the null 60 and t3, t4, t3 # e0 : mask out char matches after null
|
D | strchr.S | 26 lda t4, -1 # .. e1 : build garbage mask 29 mskqh t4, a0, t4 # e0 : 32 cmpbge zero, t4, t4 # .. e1 : bits set iff byte is garbage 37 andnot t0, t4, t0 # e0 : clear garbage bits 57 and t0, 0xaa, t4 # e0 : 60 cmovne t4, 1, t4 # .. e1 : 62 addq v0, t4, v0 # .. e1 :
|
D | ev67-strchr.S | 39 lda t4, -1 # E : build garbage mask 41 mskqh t4, a0, t4 # U : only want relevant part of first quad 49 cmpbge zero, t4, t4 # E : bits set iff byte is garbage 57 andnot t0, t4, t0 # E : clear garbage bits
|
D | stxcpy.S | 135 extqh t2, a1, t4 # e0 : 137 or t1, t4, t1 # .. e1 : 237 and a0, 7, t4 # .. e1 : find dest misalignment 245 beq t4, 1f # .. e1 : 250 subq a1, t4, a1 # .. e1 : sub dest misalignment from src addr 255 cmplt t4, t5, t12 # e0 :
|
D | stxncpy.S | 160 extqh t2, a1, t4 # e0 : 161 or t1, t4, t1 # e1 : first aligned src word complete 294 and a0, 7, t4 # .. e1 : find dest misalignment 302 beq t4, 1f # .. e1 : 306 subq a1, t4, a1 # .. e1 : sub dest misalignment from src addr 311 1: cmplt t4, t5, t12 # e1 : 332 mskqh t1, t4, t1 # e0 :
|
D | ev6-stxcpy.S | 157 extqh t2, a1, t4 # U : (stall on a1) 160 or t1, t4, t1 # E : 267 and a0, 7, t4 # E : find dest misalignment 274 beq t4, 1f # U : 283 subq a1, t4, a1 # E : sub dest misalignment from src addr 286 cmplt t4, t5, t12 # E :
|
D | ev6-stxncpy.S | 199 extqh t2, a1, t4 # U : (3 cycle stall on t2) 201 or t1, t4, t1 # E : first aligned src word complete (stall) 339 and a0, 7, t4 # E : find dest misalignment 346 beq t4, 1f # U : 353 subq a1, t4, a1 # E : sub dest misalignment from src addr 358 1: cmplt t4, t5, t12 # E : 380 mskqh t1, t4, t1 # U :
|
/arch/ia64/lib/ |
D | memcpy.S | 35 # define t4 r22 macro 189 sub t4=r0,dst // t4 = -dst 193 shl t4=t4,3 // t4 = 8*(dst & 7) 201 mov pr=t4,0x38 // (p5,p4,p3)=(dst & 7) 224 mov t4=ip 227 adds t4=.memcpy_loops-1b,t4 242 add t4=t0,t4 254 mov b6=t4
|
D | copy_page.S | 43 .rotr t1[PIPE_DEPTH], t2[PIPE_DEPTH], t3[PIPE_DEPTH], t4[PIPE_DEPTH], \ 78 (p[0]) ld8 t4[0]=[src2],16 79 (EPI) st8 [tgt2]=t4[PIPE_DEPTH-1],16
|
D | copy_page_mck.S | 79 #define t4 r20 macro 84 #define t10 t4 // alias! 143 (p[D]) ld8 t4 = [src1], 3*8 // M1 155 (p[D]) st8 [dst1] = t4, 3*8
|
D | memcpy_mck.S | 47 #define t4 r26 macro 53 #define t10 t4 // alias! 223 EK(.ex_handler, (p[D]) ld8 t4 = [src1], 3*8) // M1 235 EK(.ex_handler, (p[D]) st8 [dst1] = t4, 3*8) 425 EK(.ex_handler_short, (p14) ld1 t4=[src1],2) 434 EK(.ex_handler_short, (p14) st1 [dst1]=t4,2) 481 EX(.ex_handler_short, (p9) ld1 t4=[src1],2) 489 EX(.ex_handler_short, (p9) st1 [dst1] = t4,2)
|
/arch/arm/crypto/ |
D | sha256-armv4.pl | 44 $inp="r1"; $t4="r1"; 71 str $inp,[sp,#17*4] @ make room for $t4 88 str $inp,[sp,#17*4] @ make room for $t4 120 ldr $t4,[sp,#`($i+15)%16`*4] @ from future BODY_16_xx 137 @ ldr $t4,[sp,#`($i+14)%16`*4] 140 mov $t2,$t4,ror#$sigma1[0] 142 eor $t2,$t2,$t4,ror#$sigma1[1] 145 eor $t2,$t2,$t4,lsr#$sigma1[2] @ sigma1(X[i+14]) 146 ldr $t4,[sp,#`($i+9)%16`*4] 152 add $t1,$t1,$t4 @ X[i] [all …]
|
/arch/x86/crypto/ |
D | glue_helper-asm-avx2.S | 61 t1x, t2, t2x, t3, t3x, t4, t5) \ argument 64 vpaddq t0, t0, t4; /* ab: -2:0 ; cd: -2:0 */\ 75 add2_le128(t2, t0, t4, t3, t5); /* ab: le2 ; cd: le3 */ \ 77 add2_le128(t2, t0, t4, t3, t5); \ 79 add2_le128(t2, t0, t4, t3, t5); \ 81 add2_le128(t2, t0, t4, t3, t5); \ 83 add2_le128(t2, t0, t4, t3, t5); \ 85 add2_le128(t2, t0, t4, t3, t5); \ 87 add2_le128(t2, t0, t4, t3, t5); \
|
D | camellia-aesni-avx2-asm_64.S | 68 #define roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ argument 73 vbroadcasti128 .Linv_shift_row, t4; \ 81 vpshufb t4, x0, x0; \ 82 vpshufb t4, x7, x7; \ 83 vpshufb t4, x3, x3; \ 84 vpshufb t4, x6, x6; \ 85 vpshufb t4, x2, x2; \ 86 vpshufb t4, x5, x5; \ 87 vpshufb t4, x1, x1; \ 88 vpshufb t4, x4, x4; \ [all …]
|
D | poly1305-sse2-x86_64.S | 43 #define t4 %xmm6 macro 135 movd s3,t4 136 punpcklqdq t4,t3 142 movd s2,t4 143 punpcklqdq t4,t3 147 movdqa t1,t4 148 punpcklqdq t2,t4 150 paddq t4,t1 174 movd r0,t4 175 punpcklqdq t4,t3 [all …]
|
D | camellia-aesni-avx-asm_64.S | 51 #define roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ argument 56 vmovdqa .Linv_shift_row, t4; \ 62 vpshufb t4, x0, x0; \ 63 vpshufb t4, x7, x7; \ 64 vpshufb t4, x1, x1; \ 65 vpshufb t4, x4, x4; \ 66 vpshufb t4, x2, x2; \ 67 vpshufb t4, x5, x5; \ 68 vpshufb t4, x3, x3; \ 69 vpshufb t4, x6, x6; \ [all …]
|
/arch/sparc/lib/ |
D | blockops.S | 27 #define MIRROR_BLOCK(dst, src, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 30 ldd [src + offset + 0x08], t4; \ 34 std t4, [dst + offset + 0x08]; \
|
D | checksum_32.S | 19 #define CSUM_BIGCHUNK(buf, offset, sum, t0, t1, t2, t3, t4, t5) \ argument 24 ldd [buf + offset + 0x10], t4; \ 28 addxcc t4, sum, sum; \ 193 #define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 197 ldd [src + off + 0x10], t4; \ 204 addxcc t4, sum, sum; \ 205 std t4, [dst + off + 0x10]; \ 215 #define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 218 ldd [src + off + 0x10], t4; \ 228 st t4, [dst + off + 0x10]; \ [all …]
|
D | memcpy.S | 18 #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 21 ldd [%src + (offset) + 0x10], %t4; \ 27 st %t4, [%dst + (offset) + 0x10]; \ 32 #define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 35 ldd [%src + (offset) + 0x10], %t4; \ 39 std %t4, [%dst + (offset) + 0x10]; \
|
D | copy_user.S | 68 #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 71 ldd [%src + (offset) + 0x10], %t4; \ 77 st %t4, [%dst + (offset) + 0x10]; \ 82 #define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 85 ldd [%src + (offset) + 0x10], %t4; \ 89 std %t4, [%dst + (offset) + 0x10]; \
|
/arch/mips/lib/ |
D | csum_partial.S | 32 #define t4 $12 macro 180 CSUM_BIGCHUNK1(src, 0x00, sum, t0, t1, t3, t4) 191 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4) 192 CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4) 193 CSUM_BIGCHUNK(src, 0x40, sum, t0, t1, t3, t4) 194 CSUM_BIGCHUNK(src, 0x60, sum, t0, t1, t3, t4) 206 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4) 207 CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4) 215 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4) 500 LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@) [all …]
|
/arch/alpha/include/uapi/asm/ |
D | regdef.h | 10 #define t4 $5 macro
|
/arch/mips/include/asm/ |
D | regdef.h | 34 #define t4 $12 macro
|
/arch/arm64/include/asm/ |
D | assembler.h | 397 .macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req 399 ldp \t3, \t4, [\src, #16] 404 stnp \t3, \t4, [\dest, #16]
|