| /kernel/linux/linux-6.6/arch/riscv/lib/ |
| D | memcpy.S | 12 move t6, a0 /* Preserve return value */ 18 andi a3, t6, SZREG-1 34 sb a5, 0(t6) 35 addi t6, t6, 1 54 REG_S a4, 0(t6) 55 REG_S a5, SZREG(t6) 56 REG_S a6, 2*SZREG(t6) 57 REG_S a7, 3*SZREG(t6) 58 REG_S t0, 4*SZREG(t6) 59 REG_S t1, 5*SZREG(t6) [all …]
|
| D | uaccess.S | 17 li t6, SR_SUM 18 csrs CSR_STATUS, t6 174 csrc CSR_STATUS, t6 181 csrc CSR_STATUS, t6 193 li t6, SR_SUM 194 csrs CSR_STATUS, t6 216 csrc CSR_STATUS, t6 233 csrc CSR_STATUS, t6
|
| D | memmove.S | 39 * Both Copy Modes: t6 - Non-Inclusive last multibyte/aligned of dest 51 * Byte copy does not need t5 or t6. 66 * Now solve for t5 and t6. 69 andi t6, t4, -SZREG 103 addi a2, t6, SZREG /* The other breakpoint for the unrolled loop*/ 151 bne t3, t6, 1b 153 mv t3, t6 /* Fix the dest pointer in case the loop was broken */ 236 bne t3, t6, 1b 273 beq t4, t6, 2f 279 bne t4, t6, 1b
|
| /kernel/linux/linux-5.10/arch/riscv/lib/ |
| D | memcpy.S | 12 move t6, a0 /* Preserve return value */ 18 andi a3, t6, SZREG-1 34 sb a5, 0(t6) 35 addi t6, t6, 1 54 REG_S a4, 0(t6) 55 REG_S a5, SZREG(t6) 56 REG_S a6, 2*SZREG(t6) 57 REG_S a7, 3*SZREG(t6) 58 REG_S t0, 4*SZREG(t6) 59 REG_S t1, 5*SZREG(t6) [all …]
|
| D | uaccess.S | 19 li t6, SR_SUM 20 csrs CSR_STATUS, t6 49 csrc CSR_STATUS, t6 75 li t6, SR_SUM 76 csrs CSR_STATUS, t6 98 csrc CSR_STATUS, t6 119 csrs CSR_STATUS, t6 123 csrs CSR_STATUS, t6
|
| /kernel/linux/linux-5.10/arch/alpha/lib/ |
| D | stxncpy.S | 91 and t12, 0x80, t6 # e0 : 92 bne t6, 1f # .. e1 (zdb) 97 subq t12, 1, t6 # .. e1 : 98 or t12, t6, t8 # e0 : 156 t6 == bytemask that is -1 in dest word bytes */ 165 or t0, t6, t6 # e1 : mask original data for zero test 166 cmpbge zero, t6, t8 # e0 : 168 lda t6, -1 # e0 : 171 mskql t6, a1, t6 # e0 : mask out bits already seen 174 or t6, t2, t2 # .. e1 : [all …]
|
| D | stxcpy.S | 73 negq t8, t6 # e0 : find low bit set 74 and t8, t6, t12 # e1 (stall) 78 and t12, 0x80, t6 # e0 : 79 bne t6, 1f # .. e1 (zdb) 84 subq t12, 1, t6 # .. e1 : 85 zapnot t1, t6, t1 # e0 : clear src bytes >= null 86 or t12, t6, t8 # .. e1 : 130 t6 == bytemask that is -1 in dest word bytes */ 142 or t1, t6, t6 # e0 : 143 cmpbge zero, t6, t8 # .. e1 : [all …]
|
| D | ev6-stxncpy.S | 117 and t12, 0x80, t6 # E : (stall) 118 bne t6, 1f # U : (stall) 123 subq t12, 1, t6 # E : 124 or t12, t6, t8 # E : (stall) 195 t6 == bytemask that is -1 in dest word bytes */ 205 or t0, t6, t6 # E : mask original data for zero test (stall) 207 cmpbge zero, t6, t8 # E : 209 lda t6, -1 # E : 213 mskql t6, a1, t6 # U : mask out bits already seen 215 or t6, t2, t2 # E : (stall) [all …]
|
| D | ev6-stxcpy.S | 89 negq t8, t6 # E : find low bit set 90 and t8, t6, t12 # E : (stall) 93 and t12, 0x80, t6 # E : (stall) 94 bne t6, 1f # U : (stall) 99 subq t12, 1, t6 # E : 100 zapnot t1, t6, t1 # U : clear src bytes >= null (stall) 101 or t12, t6, t8 # E : (stall) 153 t6 == bytemask that is -1 in dest word bytes */ 165 or t1, t6, t6 # E : 166 cmpbge zero, t6, t8 # E : (stall) [all …]
|
| D | strrchr.S | 23 mov zero, t6 # .. e1 : t6 is last match aligned addr 46 cmovne t3, v0, t6 # .. e1 : save previous comparisons match 63 cmovne t3, v0, t6 # e0 : 80 addq t6, t0, v0 # .. e1 : add our aligned base ptr to the mix
|
| D | ev67-strrchr.S | 40 mov zero, t6 # E : t6 is last match aligned addr 68 cmovne t3, v0, t6 # E : save previous comparisons match 94 cmovne t3, v0, t6 # E : 105 addq t6, t5, v0 # E : and add to quadword address
|
| /kernel/linux/linux-6.6/arch/alpha/lib/ |
| D | stxncpy.S | 91 and t12, 0x80, t6 # e0 : 92 bne t6, 1f # .. e1 (zdb) 97 subq t12, 1, t6 # .. e1 : 98 or t12, t6, t8 # e0 : 156 t6 == bytemask that is -1 in dest word bytes */ 165 or t0, t6, t6 # e1 : mask original data for zero test 166 cmpbge zero, t6, t8 # e0 : 168 lda t6, -1 # e0 : 171 mskql t6, a1, t6 # e0 : mask out bits already seen 174 or t6, t2, t2 # .. e1 : [all …]
|
| D | stxcpy.S | 73 negq t8, t6 # e0 : find low bit set 74 and t8, t6, t12 # e1 (stall) 78 and t12, 0x80, t6 # e0 : 79 bne t6, 1f # .. e1 (zdb) 84 subq t12, 1, t6 # .. e1 : 85 zapnot t1, t6, t1 # e0 : clear src bytes >= null 86 or t12, t6, t8 # .. e1 : 130 t6 == bytemask that is -1 in dest word bytes */ 142 or t1, t6, t6 # e0 : 143 cmpbge zero, t6, t8 # .. e1 : [all …]
|
| D | ev6-stxncpy.S | 117 and t12, 0x80, t6 # E : (stall) 118 bne t6, 1f # U : (stall) 123 subq t12, 1, t6 # E : 124 or t12, t6, t8 # E : (stall) 195 t6 == bytemask that is -1 in dest word bytes */ 205 or t0, t6, t6 # E : mask original data for zero test (stall) 207 cmpbge zero, t6, t8 # E : 209 lda t6, -1 # E : 213 mskql t6, a1, t6 # U : mask out bits already seen 215 or t6, t2, t2 # E : (stall) [all …]
|
| D | ev6-stxcpy.S | 89 negq t8, t6 # E : find low bit set 90 and t8, t6, t12 # E : (stall) 93 and t12, 0x80, t6 # E : (stall) 94 bne t6, 1f # U : (stall) 99 subq t12, 1, t6 # E : 100 zapnot t1, t6, t1 # U : clear src bytes >= null (stall) 101 or t12, t6, t8 # E : (stall) 153 t6 == bytemask that is -1 in dest word bytes */ 165 or t1, t6, t6 # E : 166 cmpbge zero, t6, t8 # E : (stall) [all …]
|
| D | strrchr.S | 23 mov zero, t6 # .. e1 : t6 is last match aligned addr 46 cmovne t3, v0, t6 # .. e1 : save previous comparisons match 63 cmovne t3, v0, t6 # e0 : 80 addq t6, t0, v0 # .. e1 : add our aligned base ptr to the mix
|
| D | ev67-strrchr.S | 40 mov zero, t6 # E : t6 is last match aligned addr 68 cmovne t3, v0, t6 # E : save previous comparisons match 94 cmovne t3, v0, t6 # E : 105 addq t6, t5, v0 # E : and add to quadword address
|
| /kernel/linux/linux-5.10/arch/x86/crypto/ |
| D | aesni-intel_avx-x86_64.S | 605 .macro CALC_AAD_HASH GHASH_MUL AAD AADLEN T1 T2 T3 T4 T5 T6 T7 T8 621 \GHASH_MUL \T8, \T2, \T1, \T3, \T4, \T5, \T6 667 \GHASH_MUL \T7, \T2, \T1, \T3, \T4, \T5, \T6 941 .macro PRECOMPUTE_AVX HK T1 T2 T3 T4 T5 T6 argument 950 GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^2<<1 mod poly 956 GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^3<<1 mod poly 962 GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^4<<1 mod poly 968 GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^5<<1 mod poly 974 GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^6<<1 mod poly 980 GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^7<<1 mod poly [all …]
|
| /kernel/linux/linux-6.6/arch/x86/crypto/ |
| D | aesni-intel_avx-x86_64.S | 571 .macro CALC_AAD_HASH GHASH_MUL AAD AADLEN T1 T2 T3 T4 T5 T6 T7 T8 587 \GHASH_MUL \T8, \T2, \T1, \T3, \T4, \T5, \T6 635 \GHASH_MUL \T7, \T2, \T1, \T3, \T4, \T5, \T6 909 .macro PRECOMPUTE_AVX HK T1 T2 T3 T4 T5 T6 argument 918 GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^2<<1 mod poly 924 GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^3<<1 mod poly 930 GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^4<<1 mod poly 936 GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^5<<1 mod poly 942 GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^6<<1 mod poly 948 GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^7<<1 mod poly [all …]
|
| D | aria-aesni-avx2-asm_64.S | 309 t4, t5, t6, t7) \ argument 332 t4, t5, t6, t7) \ argument 334 vpxor t6, t6, t6; \ 342 vextracti128 $1, x0, t6##_x; \ 344 vaesenclast t7##_x, t6##_x, t6##_x; \ 345 vinserti128 $1, t6##_x, x0, x0; \ 347 vextracti128 $1, x4, t6##_x; \ 349 vaesenclast t7##_x, t6##_x, t6##_x; \ 350 vinserti128 $1, t6##_x, x4, x4; \ 352 vextracti128 $1, x1, t6##_x; \ [all …]
|
| D | camellia-aesni-avx-asm_64.S | 50 #define roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ argument 73 filter_8bit(x0, t0, t1, t7, t6); \ 74 filter_8bit(x7, t0, t1, t7, t6); \ 75 filter_8bit(x1, t0, t1, t7, t6); \ 76 filter_8bit(x4, t0, t1, t7, t6); \ 77 filter_8bit(x2, t0, t1, t7, t6); \ 78 filter_8bit(x5, t0, t1, t7, t6); \ 82 filter_8bit(x3, t2, t3, t7, t6); \ 83 filter_8bit(x6, t2, t3, t7, t6); \ 100 filter_8bit(x0, t0, t1, t7, t6); \ [all …]
|
| /kernel/linux/linux-5.10/drivers/net/ethernet/chelsio/ |
| D | Kconfig | 70 tristate "Chelsio Communications T4/T5/T6 Ethernet support" 76 This driver supports Chelsio T4, T5 & T6 based gigabit, 10Gb Ethernet 77 adapter and T5/T6 based 40Gb and T6 based 25Gb, 50Gb and 100Gb 92 bool "Data Center Bridging (DCB) Support for Chelsio T4/T5/T6 cards" 114 tristate "Chelsio Communications T4/T5/T6 Virtual Function Ethernet support" 117 This driver supports Chelsio T4, T5 & T6 based gigabit, 10Gb Ethernet 118 adapters and T5/T6 based 40Gb and T6 based 25Gb, 50Gb and 100Gb
|
| /kernel/linux/linux-6.6/drivers/net/ethernet/chelsio/ |
| D | Kconfig | 70 tristate "Chelsio Communications T4/T5/T6 Ethernet support" 77 This driver supports Chelsio T4, T5 & T6 based gigabit, 10Gb Ethernet 78 adapter and T5/T6 based 40Gb and T6 based 25Gb, 50Gb and 100Gb 93 bool "Data Center Bridging (DCB) Support for Chelsio T4/T5/T6 cards" 115 tristate "Chelsio Communications T4/T5/T6 Virtual Function Ethernet support" 118 This driver supports Chelsio T4, T5 & T6 based gigabit, 10Gb Ethernet 119 adapters and T5/T6 based 40Gb and T6 based 25Gb, 50Gb and 100Gb
|
| /kernel/linux/linux-6.6/arch/arm64/crypto/ |
| D | crct10dif-ce-core.S | 86 t6 .req v20 138 ext t6.8b, ad.8b, ad.8b, #3 // A3 144 pmull t6.8h, t6.8b, fold_consts.8b // J = A3*B 152 tbl t6.16b, {ad.16b}, perm3.16b // A3 158 pmull2 t6.8h, t6.16b, fold_consts.16b // J = A3*B 164 eor t6.16b, t6.16b, t9.16b // N = I + J 168 uzp1 t7.2d, t6.2d, t3.2d 169 uzp2 t6.2d, t6.2d, t3.2d 176 // t6 = (N) (P4 + P5) << 24 178 eor t7.16b, t7.16b, t6.16b [all …]
|
| /kernel/linux/linux-5.10/arch/arm64/crypto/ |
| D | crct10dif-ce-core.S | 86 t6 .req v20 138 ext t6.8b, ad.8b, ad.8b, #3 // A3 144 pmull t6.8h, t6.8b, fold_consts.8b // J = A3*B 152 tbl t6.16b, {ad.16b}, perm3.16b // A3 158 pmull2 t6.8h, t6.16b, fold_consts.16b // J = A3*B 164 eor t6.16b, t6.16b, t9.16b // N = I + J 168 uzp1 t7.2d, t6.2d, t3.2d 169 uzp2 t6.2d, t6.2d, t3.2d 176 // t6 = (N) (P4 + P5) << 24 178 eor t7.16b, t7.16b, t6.16b [all …]
|