/arch/x86/crypto/ |
D | sha256-avx2-asm.S | 112 y1 = %r14d define 161 rorx $11, e, y1 # y1 = e >> 11 # S1B 169 xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1 171 vpaddd X0, XTMP0, XTMP0 # XTMP0 = W[-7] + W[-16]# y1 = (e >> 6)# S1 172 rorx $6, e, y1 # y1 = (e >> 6) # S1 175 xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1 176 rorx $22, a, y1 # y1 = a >> 22 # S0A 181 xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0 186 xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0 193 add y1, h # h = k + w + h + S0 # -- [all …]
|
D | sha512-avx2-asm.S | 97 y1 = %r14 define 188 rorx $18, e, y1 # y1 = e >> 18 # S1B 194 xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 196 rorx $14, e, y1 # y1 = (e >> 14) # S1 199 xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 200 rorx $39, a, y1 # y1 = a >> 39 # S0A 204 xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 208 xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 214 add y1, h # h = k + w + h + S0 # -- 250 rorx $18, e, y1 # y1 = e >> 18 # S1B [all …]
|
D | sha256-ssse3-asm.S | 107 y1 = %r14d define 153 mov a, y1 # y1 = a 155 ror $(22-13), y1 # y1 = a >> (22-13) 160 xor a, y1 # y1 = a ^ (a >> (22-13) 165 ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2)) 168 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) 172 ror $2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) 185 add y1, h # h = h + S1 + CH + k + w + S0 193 mov a, y1 # y1 = a 198 ror $(22-13), y1 # y1 = a >> (22-13) [all …]
|
D | sha256-avx-asm.S | 115 y1 = %r14d define 160 mov a, y1 # y1 = a 162 MY_ROR (22-13), y1 # y1 = a >> (22-13) 166 xor a, y1 # y1 = a ^ (a >> (22-13) 171 MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2)) 174 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) 177 MY_ROR 2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) 189 add y1, h # h = h + S1 + CH + k + w + S0 195 mov a, y1 # y1 = a 199 MY_ROR (22-13), y1 # y1 = a >> (22-13) [all …]
|
D | camellia-aesni-avx-asm_64.S | 213 #define two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument 245 #define enc_rounds16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument 247 two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 249 two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 251 two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 254 #define dec_rounds16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument 256 two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 258 two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 260 two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 483 #define inpack16_pre(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument [all …]
|
D | poly1305-avx2-x86_64.S | 43 #define y1 0x2c(%r8) macro 107 # combine r1,u1,w1,y1 and s1=r1*5,v1=u1*5,x1=w1*5,z1=y1*5 108 vmovd y1,ruwy1x 238 # t1 = [ hc0[3] * r1, hc0[2] * u1,hc0[1] * w1, hc0[0] * y1 ] 261 # t1 += [ hc1[3] * r1, hc1[2] * u1, hc1[1] * w1, hc1[0] * y1 ] 285 # t1 += [ hc2[3] * r1, hc2[2] * u1, hc2[1] * w1, hc2[0] * y1 ] 309 # t1 += [ hc3[3] * r1, hc3[2] * u1, hc3[1] * w1, hc3[0] * y1 ]
|
D | camellia-aesni-avx2-asm_64.S | 247 #define two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument 279 #define enc_rounds32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument 281 two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 283 two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 285 two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 288 #define dec_rounds32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument 290 two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 292 two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 294 two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 517 #define inpack32_pre(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument [all …]
|
D | twofish-avx-x86_64-asm_64.S | 120 #define round_head_2(a, b, x1, y1, x2, y2) \ argument 130 G(RGI3, RGI4, y1, s1, s2, s3, s0); \ 133 vmovq RGS2, y1; \ 134 vpinsrq $1, RGS3, y1, y1; \
|
/arch/sparc/include/asm/ |
D | sfp-machine_32.h | 78 #define __FP_FRAC_ADD_3(r2,r1,r0,x2,x1,x0,y2,y1,y0) \ argument 88 "rI" ((USItype)(y1)), \ 93 #define __FP_FRAC_SUB_3(r2,r1,r0,x2,x1,x0,y2,y1,y0) \ argument 103 "rI" ((USItype)(y1)), \ 108 #define __FP_FRAC_ADD_4(r3,r2,r1,r0,x3,x2,x1,x0,y3,y2,y1,y0) \ argument 125 "rI" ((USItype)(y1)), \ 133 #define __FP_FRAC_SUB_4(r3,r2,r1,r0,x3,x2,x1,x0,y3,y2,y1,y0) \ argument 150 "rI" ((USItype)(y1)), \ 158 #define __FP_FRAC_DEC_3(x2,x1,x0,y2,y1,y0) __FP_FRAC_SUB_3(x2,x1,x0,x2,x1,x0,y2,y1,y0) argument 160 #define __FP_FRAC_DEC_4(x3,x2,x1,x0,y3,y2,y1,y0) __FP_FRAC_SUB_4(x3,x2,x1,x0,x3,x2,x1,x0,y3,y2,y1,y… argument
|
/arch/sh/include/uapi/asm/ |
D | ptrace_32.h | 70 unsigned long y1; member
|
/arch/arm/crypto/ |
D | aes-neonbs-core.S | 146 .macro mul_gf4, x0, x1, y0, y1, t0, t1 147 veor \t0, \y0, \y1 151 vand \x0, \x0, \y1 156 .macro mul_gf4_n_gf4, x0, x1, y0, y1, t0, x2, x3, y2, y3, t1 157 veor \t0, \y0, \y1 165 vand \x0, \x0, \y1 174 y0, y1, y2, y3, t0, t1, t2, t3 177 mul_gf4 \x0, \x1, \y0, \y1, \t2, \t3 179 veor \y1, \y1, \y3 180 mul_gf4_n_gf4 \t0, \t1, \y0, \y1, \t3, \x2, \x3, \y2, \y3, \t2 [all …]
|
/arch/arm64/crypto/ |
D | aes-neonbs-core.S | 86 .macro mul_gf4, x0, x1, y0, y1, t0, t1 87 eor \t0, \y0, \y1 91 and \x0, \x0, \y1 96 .macro mul_gf4_n_gf4, x0, x1, y0, y1, t0, x2, x3, y2, y3, t1 97 eor \t0, \y0, \y1 105 and \x0, \x0, \y1 114 y0, y1, y2, y3, t0, t1, t2, t3 117 mul_gf4 \x0, \x1, \y0, \y1, \t2, \t3 119 eor \y1, \y1, \y3 120 mul_gf4_n_gf4 \t0, \t1, \y0, \y1, \t3, \x2, \x3, \y2, \y3, \t2 [all …]
|