Lines Matching refs:t4
51 #define roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ argument
56 vmovdqa .Linv_shift_row, t4; \
62 vpshufb t4, x0, x0; \
63 vpshufb t4, x7, x7; \
64 vpshufb t4, x1, x1; \
65 vpshufb t4, x4, x4; \
66 vpshufb t4, x2, x2; \
67 vpshufb t4, x5, x5; \
68 vpshufb t4, x3, x3; \
69 vpshufb t4, x6, x6; \
82 vpxor t4, t4, t4; \
89 vaesenclast t4, x0, x0; \
90 vaesenclast t4, x7, x7; \
91 vaesenclast t4, x1, x1; \
92 vaesenclast t4, x4, x4; \
93 vaesenclast t4, x2, x2; \
94 vaesenclast t4, x5, x5; \
95 vaesenclast t4, x3, x3; \
96 vaesenclast t4, x6, x6; \
107 vmovdqa .Lpost_tf_lo_s2, t4; \
116 filter_8bit(x1, t4, t5, t7, t2); \
117 filter_8bit(x4, t4, t5, t7, t2); \
123 vpsrldq $4, t0, t4; \
128 vpshufb t6, t4, t4; \
184 vpxor t4, x3, x3; \