/arch/x86/crypto/ |
D | sha256-ssse3-asm.S | 108 y2 = %r15d define 157 mov f, y2 # y2 = f 161 xor g, y2 # y2 = f^g 164 and e, y2 # y2 = (f^g)&e 170 xor g, y2 # y2 = CH = ((f^g)&e)^g 173 add y0, y2 # y2 = S1 + CH 174 add _XFER(%rsp) , y2 # y2 = k + w + S1 + CH 177 add y2, h # h = h + S1 + CH + k + w 178 mov a, y2 # y2 = a 182 and c, y2 # y2 = a&c [all …]
|
D | sha256-avx-asm.S | 116 y2 = %r15d define 164 mov f, y2 # y2 = f 167 xor g, y2 # y2 = f^g 170 and e, y2 # y2 = (f^g)&e 176 xor g, y2 # y2 = CH = ((f^g)&e)^g 178 add y0, y2 # y2 = S1 + CH 179 add _XFER(%rsp), y2 # y2 = k + w + S1 + CH 181 add y2, h # h = h + S1 + CH + k + w 182 mov a, y2 # y2 = a 186 and c, y2 # y2 = a&c [all …]
|
D | sha512-avx2-asm.S | 98 y2 = %r15 define 191 mov f, y2 # y2 = f # CH 195 xor g, y2 # y2 = f^g # CH 198 and e, y2 # y2 = (f^g)&e # CH 207 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH 212 add y0, y2 # y2 = S1 + CH # -- 216 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- 218 add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- 255 mov f, y2 # y2 = f # CH 258 xor g, y2 # y2 = f^g # CH [all …]
|
D | sha256-avx2-asm.S | 113 y2 = %r15d define 166 mov f, y2 # y2 = f # CH 170 xor g, y2 # y2 = f^g # CH 174 and e, y2 # y2 = (f^g)&e # CH 184 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH 190 add y0, y2 # y2 = S1 + CH # -- 195 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- 199 add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- 216 mov f, y2 # y2 = f # CH 219 xor g, y2 # y2 = f^g # CH [all …]
|
D | camellia-aesni-avx-asm_64.S | 213 #define two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument 245 #define enc_rounds16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument 247 two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 249 two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 251 two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 254 #define dec_rounds16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument 256 two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 258 two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 260 two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 483 #define inpack16_pre(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument [all …]
|
D | poly1305-avx2-x86_64.S | 44 #define y2 0x30(%r8) macro 118 # combine r2,u2,w2,y2 and s2=r2*5,v2=u2*5,x2=w2*5,z2=y2*5 119 vmovd y2,ruwy2x 259 # t1 = [ hc0[3] * r2, hc0[2] * u2, hc0[1] * w2, hc0[0] * y2 ] 282 # t1 += [ hc1[3] * r2, hc1[2] * u2, hc1[1] * w2, hc1[0] * y2 ] 306 # t1 += [ hc2[3] * r2, hc2[2] * u2, hc2[1] * w2, hc2[0] * y2 ]
|
D | camellia-aesni-avx2-asm_64.S | 247 #define two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument 279 #define enc_rounds32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument 281 two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 283 two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 285 two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 288 #define dec_rounds32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument 290 two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 292 two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 294 two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 517 #define inpack32_pre(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument [all …]
|
D | twofish-avx-x86_64-asm_64.S | 120 #define round_head_2(a, b, x1, y1, x2, y2) \ argument 140 G(RGI3, RGI4, y2, s1, s2, s3, s0); \ 141 vmovq RGS2, y2; \ 142 vpinsrq $1, RGS3, y2, y2;
|
/arch/sparc/include/asm/ |
D | sfp-machine_32.h | 78 #define __FP_FRAC_ADD_3(r2,r1,r0,x2,x1,x0,y2,y1,y0) \ argument 86 "rI" ((USItype)(y2)), \ 93 #define __FP_FRAC_SUB_3(r2,r1,r0,x2,x1,x0,y2,y1,y0) \ argument 101 "rI" ((USItype)(y2)), \ 108 #define __FP_FRAC_ADD_4(r3,r2,r1,r0,x3,x2,x1,x0,y3,y2,y1,y0) \ argument 123 "rI" ((USItype)(y2)), \ 133 #define __FP_FRAC_SUB_4(r3,r2,r1,r0,x3,x2,x1,x0,y3,y2,y1,y0) \ argument 148 "rI" ((USItype)(y2)), \ 158 #define __FP_FRAC_DEC_3(x2,x1,x0,y2,y1,y0) __FP_FRAC_SUB_3(x2,x1,x0,x2,x1,x0,y2,y1,y0) argument 160 #define __FP_FRAC_DEC_4(x3,x2,x1,x0,y3,y2,y1,y0) __FP_FRAC_SUB_4(x3,x2,x1,x0,x3,x2,x1,x0,y3,y2,y1,y… argument
|
/arch/arm/crypto/ |
D | aes-neonbs-core.S | 156 .macro mul_gf4_n_gf4, x0, x1, y0, y1, t0, x2, x3, y2, y3, t1 158 veor \t1, \y2, \y3 164 vand \x3, \x3, \y2 174 y0, y1, y2, y3, t0, t1, t2, t3 178 veor \y0, \y0, \y2 180 mul_gf4_n_gf4 \t0, \t1, \y0, \y1, \t3, \x2, \x3, \y2, \y3, \t2 187 mul_gf4_n_gf4 \t0, \t1, \y0, \y1, \t3, \x6, \x7, \y2, \y3, \t2 188 veor \y0, \y0, \y2
|
/arch/arm64/crypto/ |
D | aes-neonbs-core.S | 96 .macro mul_gf4_n_gf4, x0, x1, y0, y1, t0, x2, x3, y2, y3, t1 98 eor \t1, \y2, \y3 104 and \x3, \x3, \y2 114 y0, y1, y2, y3, t0, t1, t2, t3 118 eor \y0, \y0, \y2 120 mul_gf4_n_gf4 \t0, \t1, \y0, \y1, \t3, \x2, \x3, \y2, \y3, \t2 127 mul_gf4_n_gf4 \t0, \t1, \y0, \y1, \t3, \x6, \x7, \y2, \y3, \t2 128 eor \y0, \y0, \y2
|