• Home
  • Raw
  • Download

Lines Matching refs:TMP2

164 .macro GHASH_MUL GH HK TMP1 TMP2 TMP3 TMP4 TMP5
166 pshufd $78, \GH, \TMP2
168 pxor \GH, \TMP2 # TMP2 = a1+a0
172 PCLMULQDQ 0x00, \TMP3, \TMP2 # TMP2 = (a0+a1)*(b1+b0)
173 pxor \GH, \TMP2
174 pxor \TMP1, \TMP2 # TMP2 = (a0*b0)+(a1*b0)
175 movdqa \TMP2, \TMP3
177 psrldq $8, \TMP2 # right shift TMP2 2 DWs
179 pxor \TMP2, \TMP1 # TMP2:GH holds the result of GH*HK
183 movdqa \GH, \TMP2
185 movdqa \GH, \TMP4 # copy GH into TMP2,TMP3 and TMP4
188 pslld $31, \TMP2 # packed right shift <<31
191 pxor \TMP3, \TMP2 # xor the shifted versions
192 pxor \TMP4, \TMP2
193 movdqa \TMP2, \TMP5
195 pslldq $12, \TMP2 # left shift TMP2 3 DWs
196 pxor \TMP2, \GH
200 movdqa \GH,\TMP2 # copy GH into TMP2,TMP3 and TMP4
205 psrld $1,\TMP2 # packed left shift >>1
208 pxor \TMP3,\TMP2 # xor the shifted versions
209 pxor \TMP4,\TMP2
210 pxor \TMP5, \TMP2
211 pxor \TMP2, \GH
227 .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
267 MOVADQ (%arg1),\TMP2
272 pxor \TMP2, %xmm\index
304 GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
309 GHASH_MUL %xmm6, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
311 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
313 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
316 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
318 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
321 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
357 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
371 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
384 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
397 MOVADQ (%r10),\TMP2
399 AESENC \TMP2, %xmm\index
406 MOVADQ (%r10), \TMP2
407 AESENCLAST \TMP2, \XMM1
408 AESENCLAST \TMP2, \XMM2
409 AESENCLAST \TMP2, \XMM3
410 AESENCLAST \TMP2, \XMM4
452 .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
489 MOVADQ 0(%arg1),\TMP2
494 pxor \TMP2, %xmm\index
525 GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
530 GHASH_MUL %xmm6, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
532 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
534 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
537 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
539 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
542 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
578 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
592 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
605 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
618 MOVADQ (%r10),\TMP2
620 AESENC \TMP2, %xmm\index
627 MOVADQ (%r10), \TMP2
628 AESENCLAST \TMP2, \XMM1
629 AESENCLAST \TMP2, \XMM2
630 AESENCLAST \TMP2, \XMM3
631 AESENCLAST \TMP2, \XMM4
663 .macro GHASH_4_ENCRYPT_4_PARALLEL_ENC TMP1 TMP2 TMP3 TMP4 TMP5 \
710 pshufd $78, \XMM6, \TMP2
711 pxor \XMM6, \TMP2
726 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
735 pxor \TMP2, \TMP6
737 pshufd $78, \XMM7, \TMP2
738 pxor \XMM7, \TMP2
756 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
765 pxor \TMP2, \TMP6
771 pshufd $78, \XMM8, \TMP2
772 pxor \XMM8, \TMP2
803 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
823 pxor \TMP6, \TMP2
824 pxor \TMP1, \TMP2
825 pxor \XMM5, \TMP2
826 movdqa \TMP2, \TMP3
828 psrldq $8, \TMP2 # right shift TMP2 2 DWs
830 pxor \TMP2, \TMP1 # accumulate the results in TMP1:XMM5
834 movdqa \XMM5, \TMP2
837 # move XMM5 into TMP2, TMP3, TMP4 in order to perform shifts independently
838 pslld $31, \TMP2 # packed right shift << 31
841 pxor \TMP3, \TMP2 # xor the shifted versions
842 pxor \TMP4, \TMP2
843 movdqa \TMP2, \TMP5
845 pslldq $12, \TMP2 # left shift T2 3 DWs
846 pxor \TMP2, \XMM5
850 movdqa \XMM5,\TMP2 # make 3 copies of XMM5 into TMP2, TMP3, TMP4
853 psrld $1, \TMP2 # packed left shift >>1
856 pxor \TMP3,\TMP2 # xor the shifted versions
857 pxor \TMP4,\TMP2
858 pxor \TMP5, \TMP2
859 pxor \TMP2, \XMM5
871 .macro GHASH_4_ENCRYPT_4_PARALLEL_DEC TMP1 TMP2 TMP3 TMP4 TMP5 \
918 pshufd $78, \XMM6, \TMP2
919 pxor \XMM6, \TMP2
934 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
943 pxor \TMP2, \TMP6
945 pshufd $78, \XMM7, \TMP2
946 pxor \XMM7, \TMP2
964 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
973 pxor \TMP2, \TMP6
979 pshufd $78, \XMM8, \TMP2
980 pxor \XMM8, \TMP2
1011 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1035 pxor \TMP6, \TMP2
1036 pxor \TMP1, \TMP2
1037 pxor \XMM5, \TMP2
1038 movdqa \TMP2, \TMP3
1040 psrldq $8, \TMP2 # right shift TMP2 2 DWs
1042 pxor \TMP2, \TMP1 # accumulate the results in TMP1:XMM5
1046 movdqa \XMM5, \TMP2
1049 # move XMM5 into TMP2, TMP3, TMP4 in order to perform shifts independently
1050 pslld $31, \TMP2 # packed right shift << 31
1053 pxor \TMP3, \TMP2 # xor the shifted versions
1054 pxor \TMP4, \TMP2
1055 movdqa \TMP2, \TMP5
1057 pslldq $12, \TMP2 # left shift T2 3 DWs
1058 pxor \TMP2, \XMM5
1062 movdqa \XMM5,\TMP2 # make 3 copies of XMM5 into TMP2, TMP3, TMP4
1065 psrld $1, \TMP2 # packed left shift >>1
1068 pxor \TMP3,\TMP2 # xor the shifted versions
1069 pxor \TMP4,\TMP2
1070 pxor \TMP5, \TMP2
1071 pxor \TMP2, \XMM5
1078 .macro GHASH_LAST_4 TMP1 TMP2 TMP3 TMP4 TMP5 TMP6 \
1084 pshufd $78, \XMM1, \TMP2
1085 pxor \XMM1, \TMP2
1090 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1092 movdqa \TMP2, \XMM1 # result in TMP6, XMMDst, XMM1
1097 pshufd $78, \XMM2, \TMP2
1098 pxor \XMM2, \TMP2
1103 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1106 pxor \TMP2, \XMM1
1112 pshufd $78, \XMM3, \TMP2
1113 pxor \XMM3, \TMP2
1118 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1121 pxor \TMP2, \XMM1 # results accumulated in TMP6, XMMDst, XMM1
1125 pshufd $78, \XMM4, \TMP2
1126 pxor \XMM4, \TMP2
1131 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1134 pxor \XMM1, \TMP2
1135 pxor \TMP6, \TMP2
1136 pxor \XMMDst, \TMP2
1138 movdqa \TMP2, \TMP4
1140 psrldq $8, \TMP2 # right shift TMP2 2 DWs
1142 pxor \TMP2, \TMP6
1145 movdqa \XMMDst, \TMP2
1148 # move XMMDst into TMP2, TMP3, TMP4 in order to perform 3 shifts independently
1149 pslld $31, \TMP2 # packed right shifting << 31
1152 pxor \TMP3, \TMP2 # xor the shifted versions
1153 pxor \TMP4, \TMP2
1154 movdqa \TMP2, \TMP7
1156 pslldq $12, \TMP2 # left shift TMP2 3 DWs
1157 pxor \TMP2, \XMMDst
1160 movdqa \XMMDst, \TMP2
1164 psrld $1, \TMP2 # packed left shift >> 1
1167 pxor \TMP3, \TMP2 # xor the shifted versions
1168 pxor \TMP4, \TMP2
1169 pxor \TMP7, \TMP2
1170 pxor \TMP2, \XMMDst