/arch/alpha/lib/ |
D | stxcpy.S | 73 negq t8, t6 # e0 : find low bit set 74 and t8, t6, t12 # e1 (stall) 78 and t12, 0x80, t6 # e0 : 79 bne t6, 1f # .. e1 (zdb) 84 subq t12, 1, t6 # .. e1 : 85 zapnot t1, t6, t1 # e0 : clear src bytes >= null 86 or t12, t6, t8 # .. e1 : 142 or t1, t6, t6 # e0 : 143 cmpbge zero, t6, t8 # .. e1 : 144 lda t6, -1 # e0 : for masking just below [all …]
|
D | stxncpy.S | 91 and t12, 0x80, t6 # e0 : 92 bne t6, 1f # .. e1 (zdb) 97 subq t12, 1, t6 # .. e1 : 98 or t12, t6, t8 # e0 : 165 or t0, t6, t6 # e1 : mask original data for zero test 166 cmpbge zero, t6, t8 # e0 : 168 lda t6, -1 # e0 : 171 mskql t6, a1, t6 # e0 : mask out bits already seen 174 or t6, t2, t2 # .. e1 : 243 or t8, t10, t6 # e1 : [all …]
|
D | ev6-stxcpy.S | 89 negq t8, t6 # E : find low bit set 90 and t8, t6, t12 # E : (stall) 93 and t12, 0x80, t6 # E : (stall) 94 bne t6, 1f # U : (stall) 99 subq t12, 1, t6 # E : 100 zapnot t1, t6, t1 # U : clear src bytes >= null (stall) 101 or t12, t6, t8 # E : (stall) 165 or t1, t6, t6 # E : 166 cmpbge zero, t6, t8 # E : (stall) 167 lda t6, -1 # E : for masking just below [all …]
|
D | ev6-stxncpy.S | 117 and t12, 0x80, t6 # E : (stall) 118 bne t6, 1f # U : (stall) 123 subq t12, 1, t6 # E : 124 or t12, t6, t8 # E : (stall) 205 or t0, t6, t6 # E : mask original data for zero test (stall) 207 cmpbge zero, t6, t8 # E : 209 lda t6, -1 # E : 213 mskql t6, a1, t6 # U : mask out bits already seen 215 or t6, t2, t2 # E : (stall) 288 or t8, t10, t6 # E : (stall) [all …]
|
D | strrchr.S | 23 mov zero, t6 # .. e1 : t6 is last match aligned addr 46 cmovne t3, v0, t6 # .. e1 : save previous comparisons match 63 cmovne t3, v0, t6 # e0 : 80 addq t6, t0, v0 # .. e1 : add our aligned base ptr to the mix
|
D | ev67-strrchr.S | 40 mov zero, t6 # E : t6 is last match aligned addr 68 cmovne t3, v0, t6 # E : save previous comparisons match 94 cmovne t3, v0, t6 # E : 105 addq t6, t5, v0 # E : and add to quadword address
|
/arch/ia64/lib/ |
D | copy_page.S | 45 t5[PIPE_DEPTH], t6[PIPE_DEPTH], t7[PIPE_DEPTH], t8[PIPE_DEPTH] 84 (p[0]) ld8 t6[0]=[src2],16 85 (EPI) st8 [tgt2]=t6[PIPE_DEPTH-1],16
|
D | copy_page_mck.S | 82 #define t6 t2 // alias! macro 87 #define t12 t6 // alias! 158 (p[D]) ld8 t6 = [src0], 3*8 165 (p[D]) st8 [dst0] = t6, 3*8
|
D | memcpy_mck.S | 50 #define t6 t2 // alias! macro 56 #define t12 t6 // alias! 238 EX(.ex_handler, (p[D]) ld8 t6 = [src0], 3*8) 245 EX(.ex_handler, (p[D]) st8 [dst0] = t6, 3*8) 440 EK(.ex_handler_short, (p8) ld1 t6=[src1],2) 445 EK(.ex_handler_short, (p8) st1 [dst1]=t6,2) 487 EX(.ex_handler_short, (p11) ld1 t6=[src1],2) 494 EX(.ex_handler_short, (p11) st1 [dst1] = t6,2)
|
/arch/sparc/lib/ |
D | blockops.S | 28 #define MIRROR_BLOCK(dst, src, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 32 ldd [src + offset + 0x00], t6; \ 36 std t6, [dst + offset + 0x00];
|
D | copy_user.S | 69 #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 73 ldd [%src + (offset) + 0x18], %t6; \ 80 st %t6, [%dst + (offset) + 0x18]; \ 83 #define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 87 ldd [%src + (offset) + 0x18], %t6; \ 91 std %t6, [%dst + (offset) + 0x18];
|
D | memcpy.S | 19 #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 23 ldd [%src + (offset) + 0x18], %t6; \ 30 st %t6, [%dst + (offset) + 0x18]; \ 33 #define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 37 ldd [%src + (offset) + 0x18], %t6; \ 41 std %t6, [%dst + (offset) + 0x18];
|
D | checksum_32.S | 194 #define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 200 ldd [src + off + 0x18], t6; \ 208 std t6, [dst + off + 0x18]; \ 209 addxcc t6, sum, sum; \ 216 #define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 220 ldd [src + off + 0x18], t6; \ 233 st t6, [dst + off + 0x18]; \ 234 addxcc t6, sum, sum; \
|
/arch/x86/crypto/ |
D | camellia-aesni-avx-asm_64.S | 51 #define roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ argument 74 filter_8bit(x0, t0, t1, t7, t6); \ 75 filter_8bit(x7, t0, t1, t7, t6); \ 76 filter_8bit(x1, t0, t1, t7, t6); \ 77 filter_8bit(x4, t0, t1, t7, t6); \ 78 filter_8bit(x2, t0, t1, t7, t6); \ 79 filter_8bit(x5, t0, t1, t7, t6); \ 83 filter_8bit(x3, t2, t3, t7, t6); \ 84 filter_8bit(x6, t2, t3, t7, t6); \ 101 filter_8bit(x0, t0, t1, t7, t6); \ [all …]
|
D | camellia-aesni-avx2-asm_64.S | 68 #define roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ argument 76 vbroadcasti128 .Lpre_tf_hi_s1, t6; \ 92 filter_8bit(x0, t5, t6, t7, t4); \ 93 filter_8bit(x7, t5, t6, t7, t4); \ 100 filter_8bit(x2, t5, t6, t7, t4); \ 101 filter_8bit(x5, t5, t6, t7, t4); \ 102 filter_8bit(x1, t5, t6, t7, t4); \ 103 filter_8bit(x4, t5, t6, t7, t4); \ 108 vextracti128 $1, x2, t6##_x; \ 127 vaesenclast t4##_x, t6##_x, t6##_x; \ [all …]
|
/arch/arm64/crypto/ |
D | ghash-ce-core.S | 30 t6 .req v13 92 pmull\t t6.8h, \ad, \b2\().\nb // G = A*B2 99 eor t5.16b, t5.16b, t6.16b // M = G + H 104 uzp1 t6.2d, t7.2d, t9.2d 114 eor t6.16b, t6.16b, t7.16b 118 eor t6.16b, t6.16b, t7.16b 122 zip2 t9.2d, t6.2d, t7.2d 123 zip1 t7.2d, t6.2d, t7.2d
|
D | aes-neonbs-core.S | 253 t0, t1, t2, t3, t4, t5, t6, t7, inv 265 ext \t6\().16b, \x6\().16b, \x6\().16b, #12 268 eor \x6\().16b, \x6\().16b, \t6\().16b 278 eor \t6\().16b, \t6\().16b, \x5\().16b 296 eor \x3\().16b, \x3\().16b, \t6\().16b 301 eor \x2\().16b, \x3\().16b, \t6\().16b 309 t0, t1, t2, t3, t4, t5, t6, t7 311 ext \t6\().16b, \x6\().16b, \x6\().16b, #8 315 eor \t6\().16b, \t6\().16b, \x6\().16b 326 eor \x0\().16b, \x0\().16b, \t6\().16b [all …]
|
/arch/alpha/include/uapi/asm/ |
D | regdef.h | 13 #define t6 $7 macro
|
/arch/arm/crypto/ |
D | aes-neonbs-core.S | 305 t0, t1, t2, t3, t4, t5, t6, t7, inv 317 vext.8 \t6, \x6, \x6, #12 320 veor \x6, \x6, \t6 330 veor \t6, \t6, \x5 348 veor \x3, \x3, \t6 353 veor \x2, \x3, \t6 361 t0, t1, t2, t3, t4, t5, t6, t7 368 vld1.8 {\t6-\t7}, [bskey, :256] 373 veor \x6, \x6, \t6 376 vext.8 \t6, \x6, \x6, #8 [all …]
|
/arch/mips/include/asm/ |
D | regdef.h | 38 #define t6 $14 macro
|
/arch/mips/kernel/ |
D | scall32-o32.S | 68 load_a5: user_lw(t6, 20(t0)) # argument #6 from usp 74 sw t6, 20(sp) # argument #6 to ksp 164 li t6, 0 200 lw t6, 28(sp) 203 sw t6, 24(sp)
|
D | pm-cps.c | 77 t0, t1, t2, t3, t4, t5, t6, t7, enumerator
|
/arch/tile/kernel/ |
D | hvglue_trace.c | 159 #define __HV_DECL6(t6, a6, ...) t6 a6, __HV_DECL5(__VA_ARGS__) argument 168 #define __HV_PASS6(t6, a6, ...) a6, __HV_PASS5(__VA_ARGS__) argument
|
/arch/arm64/include/asm/ |
D | assembler.h | 424 .macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req 427 ldp \t5, \t6, [\src, #32] 432 stnp \t5, \t6, [\dest, #32]
|
/arch/mips/lib/ |
D | csum_partial.S | 35 #define t6 $14 macro 505 LOAD(t6, UNIT(6)(src), .Ll_exc_copy\@) 521 STORE(t6, UNIT(6)(dst), .Ls_exc\@) 522 ADDC(t6, t7) 524 ADDC(sum, t6)
|