Lines Matching refs:T7
605 .macro CALC_AAD_HASH GHASH_MUL AAD AADLEN T1 T2 T3 T4 T5 T6 T7 T8
614 vpxor \T7, \T7, \T7
618 vmovdqu (%r10), \T7
619 vpshufb SHUF_MASK(%rip), \T7, \T7
620 vpxor \T7, \T8, \T8
627 vmovdqu \T8, \T7
631 vpxor \T7, \T7, \T7
643 vpsrldq $8, \T7, \T7
644 vpxor \T1, \T7, \T7
654 vpsrldq $4, \T7, \T7
655 vpxor \T1, \T7, \T7
663 vpshufb \T1, \T7, \T7
665 vpshufb SHUF_MASK(%rip), \T7, \T7
666 vpxor \T8, \T7, \T7
667 \GHASH_MUL \T7, \T2, \T1, \T3, \T4, \T5, \T6
670 vmovdqu \T7, AadHash(arg2)
1236 …_PARALLEL_AVX REP T1 T2 T3 T4 T5 T6 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T7 loop_idx ENC_DEC
1322 vpclmulqdq $0x00, \T5, \T2, \T7 # T7 = a0*b0
1345 vpxor \T3, \T7, \T7
1370 vpxor \T3, \T7, \T7
1393 vpxor \T3, \T7, \T7
1417 vpxor \T3, \T7, \T7
1440 vpxor \T3, \T7, \T7
1464 vpxor \T3, \T7, \T7
1489 vpxor \T3, \T7, \T7
1498 vpxor \T7, \T6, \T6
1541 vpxor \T3, \T7, \T7
1542 vpxor \T4, \T6, \T6 # accumulate the results in T6:T7
1549 vpslld $31, \T7, \T2 # packed right shifting << 31
1550 vpslld $30, \T7, \T3 # packed right shifting shift << 30
1551 vpslld $25, \T7, \T4 # packed right shifting shift << 25
1559 vpxor \T2, \T7, \T7 # first phase of the reduction complete
1574 vpsrld $1, \T7, \T2 # packed left shifting >> 1
1575 vpsrld $2, \T7, \T3 # packed left shifting >> 2
1576 vpsrld $7, \T7, \T4 # packed left shifting >> 7
1581 vpxor \T2, \T7, \T7
1582 vpxor \T7, \T6, \T6 # the result is in T6
1603 .macro GHASH_LAST_8_AVX T1 T2 T3 T4 T5 T6 T7 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8
1612 vpclmulqdq $0x00, \T5, \XMM1, \T7
1626 vpxor \T4, \T7, \T7
1641 vpxor \T4, \T7, \T7
1656 vpxor \T4, \T7, \T7
1671 vpxor \T4, \T7, \T7
1686 vpxor \T4, \T7, \T7
1701 vpxor \T4, \T7, \T7
1716 vpxor \T4, \T7, \T7
1723 vpxor \T7, \XMM1, \T2
1731 vpxor \T4, \T7, \T7
1732 vpxor \T2, \T6, \T6 # <T6:T7> holds the result of
1737 vpslld $31, \T7, \T2 # packed right shifting << 31
1738 vpslld $30, \T7, \T3 # packed right shifting shift << 30
1739 vpslld $25, \T7, \T4 # packed right shifting shift << 25
1747 vpxor \T2, \T7, \T7 # first phase of the reduction complete
1752 vpsrld $1, \T7, \T2 # packed left shifting >> 1
1753 vpsrld $2, \T7, \T3 # packed left shifting >> 2
1754 vpsrld $7, \T7, \T4 # packed left shifting >> 7
1759 vpxor \T2, \T7, \T7
1760 vpxor \T7, \T6, \T6 # the result is in T6
2191 …PARALLEL_AVX2 REP T1 T2 T3 T4 T5 T6 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T7 loop_idx ENC_DEC
2277 vpclmulqdq $0x00, \T5, \T2, \T7 # T7 = a0*b0
2298 vpxor \T3, \T7, \T7
2324 vpxor \T3, \T7, \T7
2348 vpxor \T3, \T7, \T7
2373 vpxor \T3, \T7, \T7
2397 vpxor \T3, \T7, \T7
2421 vpxor \T3, \T7, \T7
2446 vpxor \T3, \T7, \T7
2498 vpxor \T3, \T7, \T7
2499 vpxor \T6, \T1, \T1 # accumulate the results in T1:T7
2507 vpclmulqdq $0x01, \T7, \T3, \T2
2510 vpxor \T2, \T7, \T7 # first phase of the reduction complete
2525 vpclmulqdq $0x00, \T7, \T3, \T2
2528 vpclmulqdq $0x10, \T7, \T3, \T4
2553 .macro GHASH_LAST_8_AVX2 T1 T2 T3 T4 T5 T6 T7 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8
2565 vpclmulqdq $0x00, \T5, \XMM1, \T7
2581 vpxor \T4, \T7, \T7
2599 vpxor \T4, \T7, \T7
2617 vpxor \T4, \T7, \T7
2635 vpxor \T4, \T7, \T7
2653 vpxor \T4, \T7, \T7
2671 vpxor \T4, \T7, \T7
2689 vpxor \T4, \T7, \T7
2695 vpxor \T7, \XMM1, \T2
2703 vpxor \T4, \T7, \T7
2704 vpxor \T2, \T6, \T6 # <T6:T7> holds the result of the
2711 vpclmulqdq $0x01, \T7, \T3, \T2
2714 vpxor \T2, \T7, \T7 # first phase of the reduction complete
2719 vpclmulqdq $0x00, \T7, \T3, \T2
2722 vpclmulqdq $0x10, \T7, \T3, \T4