Searched refs:vpaddd (Results 1 – 13 of 13) sorted by relevance
/arch/x86/crypto/ |
D | chacha20-avx2-x86_64.S | 75 vpaddd %ymm1,%ymm12,%ymm12 81 vpaddd 0x00(%rsp),%ymm4,%ymm0 86 vpaddd 0x20(%rsp),%ymm5,%ymm0 91 vpaddd 0x40(%rsp),%ymm6,%ymm0 96 vpaddd 0x60(%rsp),%ymm7,%ymm0 102 vpaddd %ymm12,%ymm8,%ymm8 108 vpaddd %ymm13,%ymm9,%ymm9 114 vpaddd %ymm14,%ymm10,%ymm10 120 vpaddd %ymm15,%ymm11,%ymm11 127 vpaddd 0x00(%rsp),%ymm4,%ymm0 [all …]
|
D | twofish-avx-x86_64-asm_64.S | 158 vpaddd x, y, x; \ 159 vpaddd x, RK1, RT;\ 162 vpaddd y, x, y; \ 163 vpaddd y, RK2, y; \ 170 vpaddd x, y, x; \ 171 vpaddd x, RK1, RT;\ 174 vpaddd y, x, y; \ 175 vpaddd y, RK2, y; \
|
D | poly1305-avx2-x86_64.S | 118 vpaddd ruwy1,svxz1,svxz1 129 vpaddd ruwy2,svxz2,svxz2 140 vpaddd ruwy3,svxz3,svxz3 151 vpaddd ruwy4,svxz4,svxz4 165 vpaddd t1,hc0,hc0 178 vpaddd t1,hc1,hc1 191 vpaddd t1,hc2,hc2 204 vpaddd t1,hc3,hc3 217 vpaddd t1,hc4,hc4
|
D | aesni-intel_avx-x86_64.S | 406 vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 486 vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 490 vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 494 vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 498 vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 502 vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 506 vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 510 vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 514 vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 645 vpaddd ONE(%rip), \CTR, \XMM1 # INCR CNT [all …]
|
D | sha256-avx-asm.S | 168 vpaddd X0, XTMP0, XTMP0 # XTMP0 = W[-7] + W[-16] 228 vpaddd XTMP1, XTMP0, XTMP0 # XTMP0 = W[-16] + W[-7] + s0 262 vpaddd XTMP4, XTMP0, XTMP0 # XTMP0 = {..., ..., W[1], W[0]} 300 vpaddd XTMP0, XTMP5, X0 # X0 = {W[3], W[2], W[1], W[0]} 396 vpaddd (TBL), X0, XFER 400 vpaddd 1*16(TBL), X0, XFER 404 vpaddd 2*16(TBL), X0, XFER 408 vpaddd 3*16(TBL), X0, XFER 418 vpaddd (TBL), X0, XFER 425 vpaddd 1*16(TBL), X1, XFER
|
D | sha256-avx2-asm.S | 173 vpaddd X0, XTMP0, XTMP0 # XTMP0 = W[-7] + W[-16]# y1 = (e >> 6)# S1 249 vpaddd XTMP1, XTMP0, XTMP0 # XTMP0 = W[-16] + W[-7] + s0 290 vpaddd XTMP4, XTMP0, XTMP0 # XTMP0 = {..., ..., W[1], W[0]} 346 vpaddd XTMP0, XTMP5, X0 # X0 = {W[3], W[2], W[1], W[0]} 600 vpaddd 0*32(TBL, SRND), X0, XFER 604 vpaddd 1*32(TBL, SRND), X0, XFER 608 vpaddd 2*32(TBL, SRND), X0, XFER 612 vpaddd 3*32(TBL, SRND), X0, XFER 622 vpaddd 0*32(TBL, SRND), X0, XFER 625 vpaddd 1*32(TBL, SRND), X1, XFER
|
D | sha1_avx2_x86_64_asm.S | 214 vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP 257 vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP 293 vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
|
D | sha1_ssse3_asm.S | 494 vpaddd (K_BASE), W, W_TMP1 519 vpaddd K_XMM(K_BASE), W, W_TMP1 537 vpaddd K_XMM(K_BASE), W, W_TMP1
|
D | cast6-avx-x86_64-asm_64.S | 151 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
|
D | cast5-avx-x86_64-asm_64.S | 151 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
|
/arch/x86/crypto/sha256-mb/ |
D | sha256_x8_avx2.S | 227 vpaddd (TBL,ROUND,1), \_T1, \_T1 # T1 = W + K 230 vpaddd a2, h, h # h = h + ch 232 vpaddd \_T1,h, h # h = h + ch + W + K 238 vpaddd a0, h, h 239 vpaddd h, d, d 245 vpaddd a1, h, h # h = h + ch + W + K + maj 246 vpaddd a2, h, h # h = h + ch + W + K + maj + sigma0 266 vpaddd (SZ8*((\i-16)&0xf))(%rsp), \_T1, \_T1 267 vpaddd (SZ8*((\i-7)&0xf))(%rsp), a1, a1 268 vpaddd a1, \_T1, \_T1 [all …]
|
/arch/x86/crypto/sha1-mb/ |
D | sha1_x8_avx2.S | 167 vpaddd \immCNT, \regE, \regE 168 vpaddd \memW*32(%rsp), \regE, \regE 170 vpaddd \regT, \regE, \regE 173 vpaddd \regF, \regE, \regE 177 vpaddd \immCNT, \regE, \regE 193 vpaddd \regF, \regE, \regE 195 vpaddd \regT, \regE, \regE 198 vpaddd \regF, \regE, \regE 415 vpaddd AA,A,A 416 vpaddd BB,B,B [all …]
|
/arch/x86/lib/ |
D | x86-opcode-map.txt | 609 fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1)
|