1#!/usr/bin/env perl 2# 3# ==================================================================== 4# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL 5# project. The module is, however, dual licensed under OpenSSL and 6# CRYPTOGAMS licenses depending on where you obtain it. For further 7# details see http://www.openssl.org/~appro/cryptogams/. 8# ==================================================================== 9# 10# March, June 2010 11# 12# The module implements "4-bit" GCM GHASH function and underlying 13# single multiplication operation in GF(2^128). "4-bit" means that 14# it uses 256 bytes per-key table [+128 bytes shared table]. GHASH 15# function features so called "528B" variant utilizing additional 16# 256+16 bytes of per-key storage [+512 bytes shared table]. 17# Performance results are for this streamed GHASH subroutine and are 18# expressed in cycles per processed byte, less is better: 19# 20# gcc 3.4.x(*) assembler 21# 22# P4 28.6 14.0 +100% 23# Opteron 19.3 7.7 +150% 24# Core2 17.8 8.1(**) +120% 25# Atom 31.6 16.8 +88% 26# VIA Nano 21.8 10.1 +115% 27# 28# (*) comparison is not completely fair, because C results are 29# for vanilla "256B" implementation, while assembler results 30# are for "528B";-) 31# (**) it's mystery [to me] why Core2 result is not same as for 32# Opteron; 33 34# May 2010 35# 36# Add PCLMULQDQ version performing at 2.02 cycles per processed byte. 37# See ghash-x86.pl for background information and details about coding 38# techniques. 39# 40# Special thanks to David Woodhouse <dwmw2@infradead.org> for 41# providing access to a Westmere-based system on behalf of Intel 42# Open Source Technology Centre. 43 44# December 2012 45# 46# Overhaul: aggregate Karatsuba post-processing, improve ILP in 47# reduction_alg9, increase reduction aggregate factor to 4x. As for 48# the latter. ghash-x86.pl discusses that it makes lesser sense to 49# increase aggregate factor. Then why increase here? Critical path 50# consists of 3 independent pclmulqdq instructions, Karatsuba post- 51# processing and reduction. "On top" of this we lay down aggregated 52# multiplication operations, triplets of independent pclmulqdq's. As 53# issue rate for pclmulqdq is limited, it makes lesser sense to 54# aggregate more multiplications than it takes to perform remaining 55# non-multiplication operations. 2x is near-optimal coefficient for 56# contemporary Intel CPUs (therefore modest improvement coefficient), 57# but not for Bulldozer. Latter is because logical SIMD operations 58# are twice as slow in comparison to Intel, so that critical path is 59# longer. A CPU with higher pclmulqdq issue rate would also benefit 60# from higher aggregate factor... 61# 62# Westmere 1.78(+13%) 63# Sandy Bridge 1.80(+8%) 64# Ivy Bridge 1.80(+7%) 65# Haswell 0.55(+93%) (if system doesn't support AVX) 66# Broadwell 0.45(+110%)(if system doesn't support AVX) 67# Skylake 0.44(+110%)(if system doesn't support AVX) 68# Bulldozer 1.49(+27%) 69# Silvermont 2.88(+13%) 70# Goldmont 1.08(+24%) 71 72# March 2013 73# 74# ... 8x aggregate factor AVX code path is using reduction algorithm 75# suggested by Shay Gueron[1]. Even though contemporary AVX-capable 76# CPUs such as Sandy and Ivy Bridge can execute it, the code performs 77# sub-optimally in comparison to above mentioned version. But thanks 78# to Ilya Albrekht and Max Locktyukhin of Intel Corp. we knew that 79# it performs in 0.41 cycles per byte on Haswell processor, in 80# 0.29 on Broadwell, and in 0.36 on Skylake. 81# 82# [1] http://rt.openssl.org/Ticket/Display.html?id=2900&user=guest&pass=guest 83 84$flavour = shift; 85$output = shift; 86if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } 87 88$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/); 89 90$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; 91( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or 92( $xlate="${dir}../../../perlasm/x86_64-xlate.pl" and -f $xlate) or 93die "can't locate x86_64-xlate.pl"; 94 95# See the notes about |$avx| in aesni-gcm-x86_64.pl; otherwise tags will be 96# computed incorrectly. 97# 98# In upstream, this is controlled by shelling out to the compiler to check 99# versions, but BoringSSL is intended to be used with pre-generated perlasm 100# output, so this isn't useful anyway. 101$avx = 1; 102 103open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\""; 104*STDOUT=*OUT; 105 106$do4xaggr=1; 107 108# common register layout 109$nlo="%rax"; 110$nhi="%rbx"; 111$Zlo="%r8"; 112$Zhi="%r9"; 113$tmp="%r10"; 114$rem_4bit = "%r11"; 115 116$Xi="%rdi"; 117$Htbl="%rsi"; 118 119# per-function register layout 120$cnt="%rcx"; 121$rem="%rdx"; 122 123sub LB() { my $r=shift; $r =~ s/%[er]([a-d])x/%\1l/ or 124 $r =~ s/%[er]([sd]i)/%\1l/ or 125 $r =~ s/%[er](bp)/%\1l/ or 126 $r =~ s/%(r[0-9]+)[d]?/%\1b/; $r; } 127 128sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm 129{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; 130 my $arg = pop; 131 $arg = "\$$arg" if ($arg*1 eq $arg); 132 $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n"; 133} 134 135{ my $N; 136 sub loop() { 137 my $inp = shift; 138 139 $N++; 140$code.=<<___; 141 xor $nlo,$nlo 142 xor $nhi,$nhi 143 mov `&LB("$Zlo")`,`&LB("$nlo")` 144 mov `&LB("$Zlo")`,`&LB("$nhi")` 145 shl \$4,`&LB("$nlo")` 146 mov \$14,$cnt 147 mov 8($Htbl,$nlo),$Zlo 148 mov ($Htbl,$nlo),$Zhi 149 and \$0xf0,`&LB("$nhi")` 150 mov $Zlo,$rem 151 jmp .Loop$N 152 153.align 16 154.Loop$N: 155 shr \$4,$Zlo 156 and \$0xf,$rem 157 mov $Zhi,$tmp 158 mov ($inp,$cnt),`&LB("$nlo")` 159 shr \$4,$Zhi 160 xor 8($Htbl,$nhi),$Zlo 161 shl \$60,$tmp 162 xor ($Htbl,$nhi),$Zhi 163 mov `&LB("$nlo")`,`&LB("$nhi")` 164 xor ($rem_4bit,$rem,8),$Zhi 165 mov $Zlo,$rem 166 shl \$4,`&LB("$nlo")` 167 xor $tmp,$Zlo 168 dec $cnt 169 js .Lbreak$N 170 171 shr \$4,$Zlo 172 and \$0xf,$rem 173 mov $Zhi,$tmp 174 shr \$4,$Zhi 175 xor 8($Htbl,$nlo),$Zlo 176 shl \$60,$tmp 177 xor ($Htbl,$nlo),$Zhi 178 and \$0xf0,`&LB("$nhi")` 179 xor ($rem_4bit,$rem,8),$Zhi 180 mov $Zlo,$rem 181 xor $tmp,$Zlo 182 jmp .Loop$N 183 184.align 16 185.Lbreak$N: 186 shr \$4,$Zlo 187 and \$0xf,$rem 188 mov $Zhi,$tmp 189 shr \$4,$Zhi 190 xor 8($Htbl,$nlo),$Zlo 191 shl \$60,$tmp 192 xor ($Htbl,$nlo),$Zhi 193 and \$0xf0,`&LB("$nhi")` 194 xor ($rem_4bit,$rem,8),$Zhi 195 mov $Zlo,$rem 196 xor $tmp,$Zlo 197 198 shr \$4,$Zlo 199 and \$0xf,$rem 200 mov $Zhi,$tmp 201 shr \$4,$Zhi 202 xor 8($Htbl,$nhi),$Zlo 203 shl \$60,$tmp 204 xor ($Htbl,$nhi),$Zhi 205 xor $tmp,$Zlo 206 xor ($rem_4bit,$rem,8),$Zhi 207 208 bswap $Zlo 209 bswap $Zhi 210___ 211}} 212 213$code=<<___; 214.text 215.extern OPENSSL_ia32cap_P 216 217.globl gcm_gmult_4bit 218.type gcm_gmult_4bit,\@function,2 219.align 16 220gcm_gmult_4bit: 221 push %rbx 222 push %rbp # %rbp and others are pushed exclusively in 223 push %r12 # order to reuse Win64 exception handler... 224 push %r13 225 push %r14 226 push %r15 227 sub \$280,%rsp 228.Lgmult_prologue: 229 230 movzb 15($Xi),$Zlo 231 lea .Lrem_4bit(%rip),$rem_4bit 232___ 233 &loop ($Xi); 234$code.=<<___; 235 mov $Zlo,8($Xi) 236 mov $Zhi,($Xi) 237 238 lea 280+48(%rsp),%rsi 239 mov -8(%rsi),%rbx 240 lea (%rsi),%rsp 241.Lgmult_epilogue: 242 ret 243.size gcm_gmult_4bit,.-gcm_gmult_4bit 244___ 245 246# per-function register layout 247$inp="%rdx"; 248$len="%rcx"; 249$rem_8bit=$rem_4bit; 250 251$code.=<<___; 252.globl gcm_ghash_4bit 253.type gcm_ghash_4bit,\@function,4 254.align 16 255gcm_ghash_4bit: 256 push %rbx 257 push %rbp 258 push %r12 259 push %r13 260 push %r14 261 push %r15 262 sub \$280,%rsp 263.Lghash_prologue: 264 mov $inp,%r14 # reassign couple of args 265 mov $len,%r15 266___ 267{ my $inp="%r14"; 268 my $dat="%edx"; 269 my $len="%r15"; 270 my @nhi=("%ebx","%ecx"); 271 my @rem=("%r12","%r13"); 272 my $Hshr4="%rbp"; 273 274 &sub ($Htbl,-128); # size optimization 275 &lea ($Hshr4,"16+128(%rsp)"); 276 { my @lo =($nlo,$nhi); 277 my @hi =($Zlo,$Zhi); 278 279 &xor ($dat,$dat); 280 for ($i=0,$j=-2;$i<18;$i++,$j++) { 281 &mov ("$j(%rsp)",&LB($dat)) if ($i>1); 282 &or ($lo[0],$tmp) if ($i>1); 283 &mov (&LB($dat),&LB($lo[1])) if ($i>0 && $i<17); 284 &shr ($lo[1],4) if ($i>0 && $i<17); 285 &mov ($tmp,$hi[1]) if ($i>0 && $i<17); 286 &shr ($hi[1],4) if ($i>0 && $i<17); 287 &mov ("8*$j($Hshr4)",$hi[0]) if ($i>1); 288 &mov ($hi[0],"16*$i+0-128($Htbl)") if ($i<16); 289 &shl (&LB($dat),4) if ($i>0 && $i<17); 290 &mov ("8*$j-128($Hshr4)",$lo[0]) if ($i>1); 291 &mov ($lo[0],"16*$i+8-128($Htbl)") if ($i<16); 292 &shl ($tmp,60) if ($i>0 && $i<17); 293 294 push (@lo,shift(@lo)); 295 push (@hi,shift(@hi)); 296 } 297 } 298 &add ($Htbl,-128); 299 &mov ($Zlo,"8($Xi)"); 300 &mov ($Zhi,"0($Xi)"); 301 &add ($len,$inp); # pointer to the end of data 302 &lea ($rem_8bit,".Lrem_8bit(%rip)"); 303 &jmp (".Louter_loop"); 304 305$code.=".align 16\n.Louter_loop:\n"; 306 &xor ($Zhi,"($inp)"); 307 &mov ("%rdx","8($inp)"); 308 &lea ($inp,"16($inp)"); 309 &xor ("%rdx",$Zlo); 310 &mov ("($Xi)",$Zhi); 311 &mov ("8($Xi)","%rdx"); 312 &shr ("%rdx",32); 313 314 &xor ($nlo,$nlo); 315 &rol ($dat,8); 316 &mov (&LB($nlo),&LB($dat)); 317 &movz ($nhi[0],&LB($dat)); 318 &shl (&LB($nlo),4); 319 &shr ($nhi[0],4); 320 321 for ($j=11,$i=0;$i<15;$i++) { 322 &rol ($dat,8); 323 &xor ($Zlo,"8($Htbl,$nlo)") if ($i>0); 324 &xor ($Zhi,"($Htbl,$nlo)") if ($i>0); 325 &mov ($Zlo,"8($Htbl,$nlo)") if ($i==0); 326 &mov ($Zhi,"($Htbl,$nlo)") if ($i==0); 327 328 &mov (&LB($nlo),&LB($dat)); 329 &xor ($Zlo,$tmp) if ($i>0); 330 &movzw ($rem[1],"($rem_8bit,$rem[1],2)") if ($i>0); 331 332 &movz ($nhi[1],&LB($dat)); 333 &shl (&LB($nlo),4); 334 &movzb ($rem[0],"(%rsp,$nhi[0])"); 335 336 &shr ($nhi[1],4) if ($i<14); 337 &and ($nhi[1],0xf0) if ($i==14); 338 &shl ($rem[1],48) if ($i>0); 339 &xor ($rem[0],$Zlo); 340 341 &mov ($tmp,$Zhi); 342 &xor ($Zhi,$rem[1]) if ($i>0); 343 &shr ($Zlo,8); 344 345 &movz ($rem[0],&LB($rem[0])); 346 &mov ($dat,"$j($Xi)") if (--$j%4==0); 347 &shr ($Zhi,8); 348 349 &xor ($Zlo,"-128($Hshr4,$nhi[0],8)"); 350 &shl ($tmp,56); 351 &xor ($Zhi,"($Hshr4,$nhi[0],8)"); 352 353 unshift (@nhi,pop(@nhi)); # "rotate" registers 354 unshift (@rem,pop(@rem)); 355 } 356 &movzw ($rem[1],"($rem_8bit,$rem[1],2)"); 357 &xor ($Zlo,"8($Htbl,$nlo)"); 358 &xor ($Zhi,"($Htbl,$nlo)"); 359 360 &shl ($rem[1],48); 361 &xor ($Zlo,$tmp); 362 363 &xor ($Zhi,$rem[1]); 364 &movz ($rem[0],&LB($Zlo)); 365 &shr ($Zlo,4); 366 367 &mov ($tmp,$Zhi); 368 &shl (&LB($rem[0]),4); 369 &shr ($Zhi,4); 370 371 &xor ($Zlo,"8($Htbl,$nhi[0])"); 372 &movzw ($rem[0],"($rem_8bit,$rem[0],2)"); 373 &shl ($tmp,60); 374 375 &xor ($Zhi,"($Htbl,$nhi[0])"); 376 &xor ($Zlo,$tmp); 377 &shl ($rem[0],48); 378 379 &bswap ($Zlo); 380 &xor ($Zhi,$rem[0]); 381 382 &bswap ($Zhi); 383 &cmp ($inp,$len); 384 &jb (".Louter_loop"); 385} 386$code.=<<___; 387 mov $Zlo,8($Xi) 388 mov $Zhi,($Xi) 389 390 lea 280+48(%rsp),%rsi 391 mov -48(%rsi),%r15 392 mov -40(%rsi),%r14 393 mov -32(%rsi),%r13 394 mov -24(%rsi),%r12 395 mov -16(%rsi),%rbp 396 mov -8(%rsi),%rbx 397 lea 0(%rsi),%rsp 398.Lghash_epilogue: 399 ret 400.size gcm_ghash_4bit,.-gcm_ghash_4bit 401___ 402 403###################################################################### 404# PCLMULQDQ version. 405 406@_4args=$win64? ("%rcx","%rdx","%r8", "%r9") : # Win64 order 407 ("%rdi","%rsi","%rdx","%rcx"); # Unix order 408 409($Xi,$Xhi)=("%xmm0","%xmm1"); $Hkey="%xmm2"; 410($T1,$T2,$T3)=("%xmm3","%xmm4","%xmm5"); 411 412sub clmul64x64_T2 { # minimal register pressure 413my ($Xhi,$Xi,$Hkey,$HK)=@_; 414 415if (!defined($HK)) { $HK = $T2; 416$code.=<<___; 417 movdqa $Xi,$Xhi # 418 pshufd \$0b01001110,$Xi,$T1 419 pshufd \$0b01001110,$Hkey,$T2 420 pxor $Xi,$T1 # 421 pxor $Hkey,$T2 422___ 423} else { 424$code.=<<___; 425 movdqa $Xi,$Xhi # 426 pshufd \$0b01001110,$Xi,$T1 427 pxor $Xi,$T1 # 428___ 429} 430$code.=<<___; 431 pclmulqdq \$0x00,$Hkey,$Xi ####### 432 pclmulqdq \$0x11,$Hkey,$Xhi ####### 433 pclmulqdq \$0x00,$HK,$T1 ####### 434 pxor $Xi,$T1 # 435 pxor $Xhi,$T1 # 436 437 movdqa $T1,$T2 # 438 psrldq \$8,$T1 439 pslldq \$8,$T2 # 440 pxor $T1,$Xhi 441 pxor $T2,$Xi # 442___ 443} 444 445sub reduction_alg9 { # 17/11 times faster than Intel version 446my ($Xhi,$Xi) = @_; 447 448$code.=<<___; 449 # 1st phase 450 movdqa $Xi,$T2 # 451 movdqa $Xi,$T1 452 psllq \$5,$Xi 453 pxor $Xi,$T1 # 454 psllq \$1,$Xi 455 pxor $T1,$Xi # 456 psllq \$57,$Xi # 457 movdqa $Xi,$T1 # 458 pslldq \$8,$Xi 459 psrldq \$8,$T1 # 460 pxor $T2,$Xi 461 pxor $T1,$Xhi # 462 463 # 2nd phase 464 movdqa $Xi,$T2 465 psrlq \$1,$Xi 466 pxor $T2,$Xhi # 467 pxor $Xi,$T2 468 psrlq \$5,$Xi 469 pxor $T2,$Xi # 470 psrlq \$1,$Xi # 471 pxor $Xhi,$Xi # 472___ 473} 474 475{ my ($Htbl,$Xip)=@_4args; 476 my $HK="%xmm6"; 477 478$code.=<<___; 479.globl gcm_init_clmul 480.type gcm_init_clmul,\@abi-omnipotent 481.align 16 482gcm_init_clmul: 483.L_init_clmul: 484___ 485$code.=<<___ if ($win64); 486.LSEH_begin_gcm_init_clmul: 487 # I can't trust assembler to use specific encoding:-( 488 .byte 0x48,0x83,0xec,0x18 #sub $0x18,%rsp 489 .byte 0x0f,0x29,0x34,0x24 #movaps %xmm6,(%rsp) 490___ 491$code.=<<___; 492 movdqu ($Xip),$Hkey 493 pshufd \$0b01001110,$Hkey,$Hkey # dword swap 494 495 # <<1 twist 496 pshufd \$0b11111111,$Hkey,$T2 # broadcast uppermost dword 497 movdqa $Hkey,$T1 498 psllq \$1,$Hkey 499 pxor $T3,$T3 # 500 psrlq \$63,$T1 501 pcmpgtd $T2,$T3 # broadcast carry bit 502 pslldq \$8,$T1 503 por $T1,$Hkey # H<<=1 504 505 # magic reduction 506 pand .L0x1c2_polynomial(%rip),$T3 507 pxor $T3,$Hkey # if(carry) H^=0x1c2_polynomial 508 509 # calculate H^2 510 pshufd \$0b01001110,$Hkey,$HK 511 movdqa $Hkey,$Xi 512 pxor $Hkey,$HK 513___ 514 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$HK); 515 &reduction_alg9 ($Xhi,$Xi); 516$code.=<<___; 517 pshufd \$0b01001110,$Hkey,$T1 518 pshufd \$0b01001110,$Xi,$T2 519 pxor $Hkey,$T1 # Karatsuba pre-processing 520 movdqu $Hkey,0x00($Htbl) # save H 521 pxor $Xi,$T2 # Karatsuba pre-processing 522 movdqu $Xi,0x10($Htbl) # save H^2 523 palignr \$8,$T1,$T2 # low part is H.lo^H.hi... 524 movdqu $T2,0x20($Htbl) # save Karatsuba "salt" 525___ 526if ($do4xaggr) { 527 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$HK); # H^3 528 &reduction_alg9 ($Xhi,$Xi); 529$code.=<<___; 530 movdqa $Xi,$T3 531___ 532 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$HK); # H^4 533 &reduction_alg9 ($Xhi,$Xi); 534$code.=<<___; 535 pshufd \$0b01001110,$T3,$T1 536 pshufd \$0b01001110,$Xi,$T2 537 pxor $T3,$T1 # Karatsuba pre-processing 538 movdqu $T3,0x30($Htbl) # save H^3 539 pxor $Xi,$T2 # Karatsuba pre-processing 540 movdqu $Xi,0x40($Htbl) # save H^4 541 palignr \$8,$T1,$T2 # low part is H^3.lo^H^3.hi... 542 movdqu $T2,0x50($Htbl) # save Karatsuba "salt" 543___ 544} 545$code.=<<___ if ($win64); 546 movaps (%rsp),%xmm6 547 lea 0x18(%rsp),%rsp 548.LSEH_end_gcm_init_clmul: 549___ 550$code.=<<___; 551 ret 552.size gcm_init_clmul,.-gcm_init_clmul 553___ 554} 555 556{ my ($Xip,$Htbl)=@_4args; 557 558$code.=<<___; 559.globl gcm_gmult_clmul 560.type gcm_gmult_clmul,\@abi-omnipotent 561.align 16 562gcm_gmult_clmul: 563.L_gmult_clmul: 564 movdqu ($Xip),$Xi 565 movdqa .Lbswap_mask(%rip),$T3 566 movdqu ($Htbl),$Hkey 567 movdqu 0x20($Htbl),$T2 568 pshufb $T3,$Xi 569___ 570 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$T2); 571$code.=<<___ if (0 || (&reduction_alg9($Xhi,$Xi)&&0)); 572 # experimental alternative. special thing about is that there 573 # no dependency between the two multiplications... 574 mov \$`0xE1<<1`,%eax 575 mov \$0xA040608020C0E000,%r10 # ((7..0)·0xE0)&0xff 576 mov \$0x07,%r11d 577 movq %rax,$T1 578 movq %r10,$T2 579 movq %r11,$T3 # borrow $T3 580 pand $Xi,$T3 581 pshufb $T3,$T2 # ($Xi&7)·0xE0 582 movq %rax,$T3 583 pclmulqdq \$0x00,$Xi,$T1 # ·(0xE1<<1) 584 pxor $Xi,$T2 585 pslldq \$15,$T2 586 paddd $T2,$T2 # <<(64+56+1) 587 pxor $T2,$Xi 588 pclmulqdq \$0x01,$T3,$Xi 589 movdqa .Lbswap_mask(%rip),$T3 # reload $T3 590 psrldq \$1,$T1 591 pxor $T1,$Xhi 592 pslldq \$7,$Xi 593 pxor $Xhi,$Xi 594___ 595$code.=<<___; 596 pshufb $T3,$Xi 597 movdqu $Xi,($Xip) 598 ret 599.size gcm_gmult_clmul,.-gcm_gmult_clmul 600___ 601} 602 603{ my ($Xip,$Htbl,$inp,$len)=@_4args; 604 my ($Xln,$Xmn,$Xhn,$Hkey2,$HK) = map("%xmm$_",(3..7)); 605 my ($T1,$T2,$T3)=map("%xmm$_",(8..10)); 606 607$code.=<<___; 608.globl gcm_ghash_clmul 609.type gcm_ghash_clmul,\@abi-omnipotent 610.align 32 611gcm_ghash_clmul: 612.L_ghash_clmul: 613___ 614$code.=<<___ if ($win64); 615 lea -0x88(%rsp),%rax 616.LSEH_begin_gcm_ghash_clmul: 617 # I can't trust assembler to use specific encoding:-( 618 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax),%rsp 619 .byte 0x0f,0x29,0x70,0xe0 #movaps %xmm6,-0x20(%rax) 620 .byte 0x0f,0x29,0x78,0xf0 #movaps %xmm7,-0x10(%rax) 621 .byte 0x44,0x0f,0x29,0x00 #movaps %xmm8,0(%rax) 622 .byte 0x44,0x0f,0x29,0x48,0x10 #movaps %xmm9,0x10(%rax) 623 .byte 0x44,0x0f,0x29,0x50,0x20 #movaps %xmm10,0x20(%rax) 624 .byte 0x44,0x0f,0x29,0x58,0x30 #movaps %xmm11,0x30(%rax) 625 .byte 0x44,0x0f,0x29,0x60,0x40 #movaps %xmm12,0x40(%rax) 626 .byte 0x44,0x0f,0x29,0x68,0x50 #movaps %xmm13,0x50(%rax) 627 .byte 0x44,0x0f,0x29,0x70,0x60 #movaps %xmm14,0x60(%rax) 628 .byte 0x44,0x0f,0x29,0x78,0x70 #movaps %xmm15,0x70(%rax) 629___ 630$code.=<<___; 631 movdqa .Lbswap_mask(%rip),$T3 632 633 movdqu ($Xip),$Xi 634 movdqu ($Htbl),$Hkey 635 movdqu 0x20($Htbl),$HK 636 pshufb $T3,$Xi 637 638 sub \$0x10,$len 639 jz .Lodd_tail 640 641 movdqu 0x10($Htbl),$Hkey2 642___ 643if ($do4xaggr) { 644my ($Xl,$Xm,$Xh,$Hkey3,$Hkey4)=map("%xmm$_",(11..15)); 645 646$code.=<<___; 647 leaq OPENSSL_ia32cap_P(%rip),%rax 648 mov 4(%rax),%eax 649 cmp \$0x30,$len 650 jb .Lskip4x 651 652 and \$`1<<26|1<<22`,%eax # isolate MOVBE+XSAVE 653 cmp \$`1<<22`,%eax # check for MOVBE without XSAVE 654 je .Lskip4x 655 656 sub \$0x30,$len 657 mov \$0xA040608020C0E000,%rax # ((7..0)·0xE0)&0xff 658 movdqu 0x30($Htbl),$Hkey3 659 movdqu 0x40($Htbl),$Hkey4 660 661 ####### 662 # Xi+4 =[(H*Ii+3) + (H^2*Ii+2) + (H^3*Ii+1) + H^4*(Ii+Xi)] mod P 663 # 664 movdqu 0x30($inp),$Xln 665 movdqu 0x20($inp),$Xl 666 pshufb $T3,$Xln 667 pshufb $T3,$Xl 668 movdqa $Xln,$Xhn 669 pshufd \$0b01001110,$Xln,$Xmn 670 pxor $Xln,$Xmn 671 pclmulqdq \$0x00,$Hkey,$Xln 672 pclmulqdq \$0x11,$Hkey,$Xhn 673 pclmulqdq \$0x00,$HK,$Xmn 674 675 movdqa $Xl,$Xh 676 pshufd \$0b01001110,$Xl,$Xm 677 pxor $Xl,$Xm 678 pclmulqdq \$0x00,$Hkey2,$Xl 679 pclmulqdq \$0x11,$Hkey2,$Xh 680 pclmulqdq \$0x10,$HK,$Xm 681 xorps $Xl,$Xln 682 xorps $Xh,$Xhn 683 movups 0x50($Htbl),$HK 684 xorps $Xm,$Xmn 685 686 movdqu 0x10($inp),$Xl 687 movdqu 0($inp),$T1 688 pshufb $T3,$Xl 689 pshufb $T3,$T1 690 movdqa $Xl,$Xh 691 pshufd \$0b01001110,$Xl,$Xm 692 pxor $T1,$Xi 693 pxor $Xl,$Xm 694 pclmulqdq \$0x00,$Hkey3,$Xl 695 movdqa $Xi,$Xhi 696 pshufd \$0b01001110,$Xi,$T1 697 pxor $Xi,$T1 698 pclmulqdq \$0x11,$Hkey3,$Xh 699 pclmulqdq \$0x00,$HK,$Xm 700 xorps $Xl,$Xln 701 xorps $Xh,$Xhn 702 703 lea 0x40($inp),$inp 704 sub \$0x40,$len 705 jc .Ltail4x 706 707 jmp .Lmod4_loop 708.align 32 709.Lmod4_loop: 710 pclmulqdq \$0x00,$Hkey4,$Xi 711 xorps $Xm,$Xmn 712 movdqu 0x30($inp),$Xl 713 pshufb $T3,$Xl 714 pclmulqdq \$0x11,$Hkey4,$Xhi 715 xorps $Xln,$Xi 716 movdqu 0x20($inp),$Xln 717 movdqa $Xl,$Xh 718 pclmulqdq \$0x10,$HK,$T1 719 pshufd \$0b01001110,$Xl,$Xm 720 xorps $Xhn,$Xhi 721 pxor $Xl,$Xm 722 pshufb $T3,$Xln 723 movups 0x20($Htbl),$HK 724 xorps $Xmn,$T1 725 pclmulqdq \$0x00,$Hkey,$Xl 726 pshufd \$0b01001110,$Xln,$Xmn 727 728 pxor $Xi,$T1 # aggregated Karatsuba post-processing 729 movdqa $Xln,$Xhn 730 pxor $Xhi,$T1 # 731 pxor $Xln,$Xmn 732 movdqa $T1,$T2 # 733 pclmulqdq \$0x11,$Hkey,$Xh 734 pslldq \$8,$T1 735 psrldq \$8,$T2 # 736 pxor $T1,$Xi 737 movdqa .L7_mask(%rip),$T1 738 pxor $T2,$Xhi # 739 movq %rax,$T2 740 741 pand $Xi,$T1 # 1st phase 742 pshufb $T1,$T2 # 743 pxor $Xi,$T2 # 744 pclmulqdq \$0x00,$HK,$Xm 745 psllq \$57,$T2 # 746 movdqa $T2,$T1 # 747 pslldq \$8,$T2 748 pclmulqdq \$0x00,$Hkey2,$Xln 749 psrldq \$8,$T1 # 750 pxor $T2,$Xi 751 pxor $T1,$Xhi # 752 movdqu 0($inp),$T1 753 754 movdqa $Xi,$T2 # 2nd phase 755 psrlq \$1,$Xi 756 pclmulqdq \$0x11,$Hkey2,$Xhn 757 xorps $Xl,$Xln 758 movdqu 0x10($inp),$Xl 759 pshufb $T3,$Xl 760 pclmulqdq \$0x10,$HK,$Xmn 761 xorps $Xh,$Xhn 762 movups 0x50($Htbl),$HK 763 pshufb $T3,$T1 764 pxor $T2,$Xhi # 765 pxor $Xi,$T2 766 psrlq \$5,$Xi 767 768 movdqa $Xl,$Xh 769 pxor $Xm,$Xmn 770 pshufd \$0b01001110,$Xl,$Xm 771 pxor $T2,$Xi # 772 pxor $T1,$Xhi 773 pxor $Xl,$Xm 774 pclmulqdq \$0x00,$Hkey3,$Xl 775 psrlq \$1,$Xi # 776 pxor $Xhi,$Xi # 777 movdqa $Xi,$Xhi 778 pclmulqdq \$0x11,$Hkey3,$Xh 779 xorps $Xl,$Xln 780 pshufd \$0b01001110,$Xi,$T1 781 pxor $Xi,$T1 782 783 pclmulqdq \$0x00,$HK,$Xm 784 xorps $Xh,$Xhn 785 786 lea 0x40($inp),$inp 787 sub \$0x40,$len 788 jnc .Lmod4_loop 789 790.Ltail4x: 791 pclmulqdq \$0x00,$Hkey4,$Xi 792 pclmulqdq \$0x11,$Hkey4,$Xhi 793 pclmulqdq \$0x10,$HK,$T1 794 xorps $Xm,$Xmn 795 xorps $Xln,$Xi 796 xorps $Xhn,$Xhi 797 pxor $Xi,$Xhi # aggregated Karatsuba post-processing 798 pxor $Xmn,$T1 799 800 pxor $Xhi,$T1 # 801 pxor $Xi,$Xhi 802 803 movdqa $T1,$T2 # 804 psrldq \$8,$T1 805 pslldq \$8,$T2 # 806 pxor $T1,$Xhi 807 pxor $T2,$Xi # 808___ 809 &reduction_alg9($Xhi,$Xi); 810$code.=<<___; 811 add \$0x40,$len 812 jz .Ldone 813 movdqu 0x20($Htbl),$HK 814 sub \$0x10,$len 815 jz .Lodd_tail 816.Lskip4x: 817___ 818} 819$code.=<<___; 820 ####### 821 # Xi+2 =[H*(Ii+1 + Xi+1)] mod P = 822 # [(H*Ii+1) + (H*Xi+1)] mod P = 823 # [(H*Ii+1) + H^2*(Ii+Xi)] mod P 824 # 825 movdqu ($inp),$T1 # Ii 826 movdqu 16($inp),$Xln # Ii+1 827 pshufb $T3,$T1 828 pshufb $T3,$Xln 829 pxor $T1,$Xi # Ii+Xi 830 831 movdqa $Xln,$Xhn 832 pshufd \$0b01001110,$Xln,$Xmn 833 pxor $Xln,$Xmn 834 pclmulqdq \$0x00,$Hkey,$Xln 835 pclmulqdq \$0x11,$Hkey,$Xhn 836 pclmulqdq \$0x00,$HK,$Xmn 837 838 lea 32($inp),$inp # i+=2 839 nop 840 sub \$0x20,$len 841 jbe .Leven_tail 842 nop 843 jmp .Lmod_loop 844 845.align 32 846.Lmod_loop: 847 movdqa $Xi,$Xhi 848 movdqa $Xmn,$T1 849 pshufd \$0b01001110,$Xi,$Xmn # 850 pxor $Xi,$Xmn # 851 852 pclmulqdq \$0x00,$Hkey2,$Xi 853 pclmulqdq \$0x11,$Hkey2,$Xhi 854 pclmulqdq \$0x10,$HK,$Xmn 855 856 pxor $Xln,$Xi # (H*Ii+1) + H^2*(Ii+Xi) 857 pxor $Xhn,$Xhi 858 movdqu ($inp),$T2 # Ii 859 pxor $Xi,$T1 # aggregated Karatsuba post-processing 860 pshufb $T3,$T2 861 movdqu 16($inp),$Xln # Ii+1 862 863 pxor $Xhi,$T1 864 pxor $T2,$Xhi # "Ii+Xi", consume early 865 pxor $T1,$Xmn 866 pshufb $T3,$Xln 867 movdqa $Xmn,$T1 # 868 psrldq \$8,$T1 869 pslldq \$8,$Xmn # 870 pxor $T1,$Xhi 871 pxor $Xmn,$Xi # 872 873 movdqa $Xln,$Xhn # 874 875 movdqa $Xi,$T2 # 1st phase 876 movdqa $Xi,$T1 877 psllq \$5,$Xi 878 pxor $Xi,$T1 # 879 pclmulqdq \$0x00,$Hkey,$Xln ####### 880 psllq \$1,$Xi 881 pxor $T1,$Xi # 882 psllq \$57,$Xi # 883 movdqa $Xi,$T1 # 884 pslldq \$8,$Xi 885 psrldq \$8,$T1 # 886 pxor $T2,$Xi 887 pshufd \$0b01001110,$Xhn,$Xmn 888 pxor $T1,$Xhi # 889 pxor $Xhn,$Xmn # 890 891 movdqa $Xi,$T2 # 2nd phase 892 psrlq \$1,$Xi 893 pclmulqdq \$0x11,$Hkey,$Xhn ####### 894 pxor $T2,$Xhi # 895 pxor $Xi,$T2 896 psrlq \$5,$Xi 897 pxor $T2,$Xi # 898 lea 32($inp),$inp 899 psrlq \$1,$Xi # 900 pclmulqdq \$0x00,$HK,$Xmn ####### 901 pxor $Xhi,$Xi # 902 903 sub \$0x20,$len 904 ja .Lmod_loop 905 906.Leven_tail: 907 movdqa $Xi,$Xhi 908 movdqa $Xmn,$T1 909 pshufd \$0b01001110,$Xi,$Xmn # 910 pxor $Xi,$Xmn # 911 912 pclmulqdq \$0x00,$Hkey2,$Xi 913 pclmulqdq \$0x11,$Hkey2,$Xhi 914 pclmulqdq \$0x10,$HK,$Xmn 915 916 pxor $Xln,$Xi # (H*Ii+1) + H^2*(Ii+Xi) 917 pxor $Xhn,$Xhi 918 pxor $Xi,$T1 919 pxor $Xhi,$T1 920 pxor $T1,$Xmn 921 movdqa $Xmn,$T1 # 922 psrldq \$8,$T1 923 pslldq \$8,$Xmn # 924 pxor $T1,$Xhi 925 pxor $Xmn,$Xi # 926___ 927 &reduction_alg9 ($Xhi,$Xi); 928$code.=<<___; 929 test $len,$len 930 jnz .Ldone 931 932.Lodd_tail: 933 movdqu ($inp),$T1 # Ii 934 pshufb $T3,$T1 935 pxor $T1,$Xi # Ii+Xi 936___ 937 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$HK); # H*(Ii+Xi) 938 &reduction_alg9 ($Xhi,$Xi); 939$code.=<<___; 940.Ldone: 941 pshufb $T3,$Xi 942 movdqu $Xi,($Xip) 943___ 944$code.=<<___ if ($win64); 945 movaps (%rsp),%xmm6 946 movaps 0x10(%rsp),%xmm7 947 movaps 0x20(%rsp),%xmm8 948 movaps 0x30(%rsp),%xmm9 949 movaps 0x40(%rsp),%xmm10 950 movaps 0x50(%rsp),%xmm11 951 movaps 0x60(%rsp),%xmm12 952 movaps 0x70(%rsp),%xmm13 953 movaps 0x80(%rsp),%xmm14 954 movaps 0x90(%rsp),%xmm15 955 lea 0xa8(%rsp),%rsp 956.LSEH_end_gcm_ghash_clmul: 957___ 958$code.=<<___; 959 ret 960.size gcm_ghash_clmul,.-gcm_ghash_clmul 961___ 962} 963 964$code.=<<___; 965.globl gcm_init_avx 966.type gcm_init_avx,\@abi-omnipotent 967.align 32 968gcm_init_avx: 969___ 970if ($avx) { 971my ($Htbl,$Xip)=@_4args; 972my $HK="%xmm6"; 973 974$code.=<<___ if ($win64); 975.LSEH_begin_gcm_init_avx: 976 # I can't trust assembler to use specific encoding:-( 977 .byte 0x48,0x83,0xec,0x18 #sub $0x18,%rsp 978 .byte 0x0f,0x29,0x34,0x24 #movaps %xmm6,(%rsp) 979___ 980$code.=<<___; 981 vzeroupper 982 983 vmovdqu ($Xip),$Hkey 984 vpshufd \$0b01001110,$Hkey,$Hkey # dword swap 985 986 # <<1 twist 987 vpshufd \$0b11111111,$Hkey,$T2 # broadcast uppermost dword 988 vpsrlq \$63,$Hkey,$T1 989 vpsllq \$1,$Hkey,$Hkey 990 vpxor $T3,$T3,$T3 # 991 vpcmpgtd $T2,$T3,$T3 # broadcast carry bit 992 vpslldq \$8,$T1,$T1 993 vpor $T1,$Hkey,$Hkey # H<<=1 994 995 # magic reduction 996 vpand .L0x1c2_polynomial(%rip),$T3,$T3 997 vpxor $T3,$Hkey,$Hkey # if(carry) H^=0x1c2_polynomial 998 999 vpunpckhqdq $Hkey,$Hkey,$HK 1000 vmovdqa $Hkey,$Xi 1001 vpxor $Hkey,$HK,$HK 1002 mov \$4,%r10 # up to H^8 1003 jmp .Linit_start_avx 1004___ 1005 1006sub clmul64x64_avx { 1007my ($Xhi,$Xi,$Hkey,$HK)=@_; 1008 1009if (!defined($HK)) { $HK = $T2; 1010$code.=<<___; 1011 vpunpckhqdq $Xi,$Xi,$T1 1012 vpunpckhqdq $Hkey,$Hkey,$T2 1013 vpxor $Xi,$T1,$T1 # 1014 vpxor $Hkey,$T2,$T2 1015___ 1016} else { 1017$code.=<<___; 1018 vpunpckhqdq $Xi,$Xi,$T1 1019 vpxor $Xi,$T1,$T1 # 1020___ 1021} 1022$code.=<<___; 1023 vpclmulqdq \$0x11,$Hkey,$Xi,$Xhi ####### 1024 vpclmulqdq \$0x00,$Hkey,$Xi,$Xi ####### 1025 vpclmulqdq \$0x00,$HK,$T1,$T1 ####### 1026 vpxor $Xi,$Xhi,$T2 # 1027 vpxor $T2,$T1,$T1 # 1028 1029 vpslldq \$8,$T1,$T2 # 1030 vpsrldq \$8,$T1,$T1 1031 vpxor $T2,$Xi,$Xi # 1032 vpxor $T1,$Xhi,$Xhi 1033___ 1034} 1035 1036sub reduction_avx { 1037my ($Xhi,$Xi) = @_; 1038 1039$code.=<<___; 1040 vpsllq \$57,$Xi,$T1 # 1st phase 1041 vpsllq \$62,$Xi,$T2 1042 vpxor $T1,$T2,$T2 # 1043 vpsllq \$63,$Xi,$T1 1044 vpxor $T1,$T2,$T2 # 1045 vpslldq \$8,$T2,$T1 # 1046 vpsrldq \$8,$T2,$T2 1047 vpxor $T1,$Xi,$Xi # 1048 vpxor $T2,$Xhi,$Xhi 1049 1050 vpsrlq \$1,$Xi,$T2 # 2nd phase 1051 vpxor $Xi,$Xhi,$Xhi 1052 vpxor $T2,$Xi,$Xi # 1053 vpsrlq \$5,$T2,$T2 1054 vpxor $T2,$Xi,$Xi # 1055 vpsrlq \$1,$Xi,$Xi # 1056 vpxor $Xhi,$Xi,$Xi # 1057___ 1058} 1059 1060$code.=<<___; 1061.align 32 1062.Linit_loop_avx: 1063 vpalignr \$8,$T1,$T2,$T3 # low part is H.lo^H.hi... 1064 vmovdqu $T3,-0x10($Htbl) # save Karatsuba "salt" 1065___ 1066 &clmul64x64_avx ($Xhi,$Xi,$Hkey,$HK); # calculate H^3,5,7 1067 &reduction_avx ($Xhi,$Xi); 1068$code.=<<___; 1069.Linit_start_avx: 1070 vmovdqa $Xi,$T3 1071___ 1072 &clmul64x64_avx ($Xhi,$Xi,$Hkey,$HK); # calculate H^2,4,6,8 1073 &reduction_avx ($Xhi,$Xi); 1074$code.=<<___; 1075 vpshufd \$0b01001110,$T3,$T1 1076 vpshufd \$0b01001110,$Xi,$T2 1077 vpxor $T3,$T1,$T1 # Karatsuba pre-processing 1078 vmovdqu $T3,0x00($Htbl) # save H^1,3,5,7 1079 vpxor $Xi,$T2,$T2 # Karatsuba pre-processing 1080 vmovdqu $Xi,0x10($Htbl) # save H^2,4,6,8 1081 lea 0x30($Htbl),$Htbl 1082 sub \$1,%r10 1083 jnz .Linit_loop_avx 1084 1085 vpalignr \$8,$T2,$T1,$T3 # last "salt" is flipped 1086 vmovdqu $T3,-0x10($Htbl) 1087 1088 vzeroupper 1089___ 1090$code.=<<___ if ($win64); 1091 movaps (%rsp),%xmm6 1092 lea 0x18(%rsp),%rsp 1093.LSEH_end_gcm_init_avx: 1094___ 1095$code.=<<___; 1096 ret 1097.size gcm_init_avx,.-gcm_init_avx 1098___ 1099} else { 1100$code.=<<___; 1101 jmp .L_init_clmul 1102.size gcm_init_avx,.-gcm_init_avx 1103___ 1104} 1105 1106$code.=<<___; 1107.globl gcm_gmult_avx 1108.type gcm_gmult_avx,\@abi-omnipotent 1109.align 32 1110gcm_gmult_avx: 1111 jmp .L_gmult_clmul 1112.size gcm_gmult_avx,.-gcm_gmult_avx 1113___ 1114 1115$code.=<<___; 1116.globl gcm_ghash_avx 1117.type gcm_ghash_avx,\@abi-omnipotent 1118.align 32 1119gcm_ghash_avx: 1120___ 1121if ($avx) { 1122my ($Xip,$Htbl,$inp,$len)=@_4args; 1123my ($Xlo,$Xhi,$Xmi, 1124 $Zlo,$Zhi,$Zmi, 1125 $Hkey,$HK,$T1,$T2, 1126 $Xi,$Xo,$Tred,$bswap,$Ii,$Ij) = map("%xmm$_",(0..15)); 1127 1128$code.=<<___ if ($win64); 1129 lea -0x88(%rsp),%rax 1130.LSEH_begin_gcm_ghash_avx: 1131 # I can't trust assembler to use specific encoding:-( 1132 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax),%rsp 1133 .byte 0x0f,0x29,0x70,0xe0 #movaps %xmm6,-0x20(%rax) 1134 .byte 0x0f,0x29,0x78,0xf0 #movaps %xmm7,-0x10(%rax) 1135 .byte 0x44,0x0f,0x29,0x00 #movaps %xmm8,0(%rax) 1136 .byte 0x44,0x0f,0x29,0x48,0x10 #movaps %xmm9,0x10(%rax) 1137 .byte 0x44,0x0f,0x29,0x50,0x20 #movaps %xmm10,0x20(%rax) 1138 .byte 0x44,0x0f,0x29,0x58,0x30 #movaps %xmm11,0x30(%rax) 1139 .byte 0x44,0x0f,0x29,0x60,0x40 #movaps %xmm12,0x40(%rax) 1140 .byte 0x44,0x0f,0x29,0x68,0x50 #movaps %xmm13,0x50(%rax) 1141 .byte 0x44,0x0f,0x29,0x70,0x60 #movaps %xmm14,0x60(%rax) 1142 .byte 0x44,0x0f,0x29,0x78,0x70 #movaps %xmm15,0x70(%rax) 1143___ 1144$code.=<<___; 1145 vzeroupper 1146 1147 vmovdqu ($Xip),$Xi # load $Xi 1148 lea .L0x1c2_polynomial(%rip),%r10 1149 lea 0x40($Htbl),$Htbl # size optimization 1150 vmovdqu .Lbswap_mask(%rip),$bswap 1151 vpshufb $bswap,$Xi,$Xi 1152 cmp \$0x80,$len 1153 jb .Lshort_avx 1154 sub \$0x80,$len 1155 1156 vmovdqu 0x70($inp),$Ii # I[7] 1157 vmovdqu 0x00-0x40($Htbl),$Hkey # $Hkey^1 1158 vpshufb $bswap,$Ii,$Ii 1159 vmovdqu 0x20-0x40($Htbl),$HK 1160 1161 vpunpckhqdq $Ii,$Ii,$T2 1162 vmovdqu 0x60($inp),$Ij # I[6] 1163 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo 1164 vpxor $Ii,$T2,$T2 1165 vpshufb $bswap,$Ij,$Ij 1166 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi 1167 vmovdqu 0x10-0x40($Htbl),$Hkey # $Hkey^2 1168 vpunpckhqdq $Ij,$Ij,$T1 1169 vmovdqu 0x50($inp),$Ii # I[5] 1170 vpclmulqdq \$0x00,$HK,$T2,$Xmi 1171 vpxor $Ij,$T1,$T1 1172 1173 vpshufb $bswap,$Ii,$Ii 1174 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo 1175 vpunpckhqdq $Ii,$Ii,$T2 1176 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi 1177 vmovdqu 0x30-0x40($Htbl),$Hkey # $Hkey^3 1178 vpxor $Ii,$T2,$T2 1179 vmovdqu 0x40($inp),$Ij # I[4] 1180 vpclmulqdq \$0x10,$HK,$T1,$Zmi 1181 vmovdqu 0x50-0x40($Htbl),$HK 1182 1183 vpshufb $bswap,$Ij,$Ij 1184 vpxor $Xlo,$Zlo,$Zlo 1185 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo 1186 vpxor $Xhi,$Zhi,$Zhi 1187 vpunpckhqdq $Ij,$Ij,$T1 1188 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi 1189 vmovdqu 0x40-0x40($Htbl),$Hkey # $Hkey^4 1190 vpxor $Xmi,$Zmi,$Zmi 1191 vpclmulqdq \$0x00,$HK,$T2,$Xmi 1192 vpxor $Ij,$T1,$T1 1193 1194 vmovdqu 0x30($inp),$Ii # I[3] 1195 vpxor $Zlo,$Xlo,$Xlo 1196 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo 1197 vpxor $Zhi,$Xhi,$Xhi 1198 vpshufb $bswap,$Ii,$Ii 1199 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi 1200 vmovdqu 0x60-0x40($Htbl),$Hkey # $Hkey^5 1201 vpxor $Zmi,$Xmi,$Xmi 1202 vpunpckhqdq $Ii,$Ii,$T2 1203 vpclmulqdq \$0x10,$HK,$T1,$Zmi 1204 vmovdqu 0x80-0x40($Htbl),$HK 1205 vpxor $Ii,$T2,$T2 1206 1207 vmovdqu 0x20($inp),$Ij # I[2] 1208 vpxor $Xlo,$Zlo,$Zlo 1209 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo 1210 vpxor $Xhi,$Zhi,$Zhi 1211 vpshufb $bswap,$Ij,$Ij 1212 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi 1213 vmovdqu 0x70-0x40($Htbl),$Hkey # $Hkey^6 1214 vpxor $Xmi,$Zmi,$Zmi 1215 vpunpckhqdq $Ij,$Ij,$T1 1216 vpclmulqdq \$0x00,$HK,$T2,$Xmi 1217 vpxor $Ij,$T1,$T1 1218 1219 vmovdqu 0x10($inp),$Ii # I[1] 1220 vpxor $Zlo,$Xlo,$Xlo 1221 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo 1222 vpxor $Zhi,$Xhi,$Xhi 1223 vpshufb $bswap,$Ii,$Ii 1224 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi 1225 vmovdqu 0x90-0x40($Htbl),$Hkey # $Hkey^7 1226 vpxor $Zmi,$Xmi,$Xmi 1227 vpunpckhqdq $Ii,$Ii,$T2 1228 vpclmulqdq \$0x10,$HK,$T1,$Zmi 1229 vmovdqu 0xb0-0x40($Htbl),$HK 1230 vpxor $Ii,$T2,$T2 1231 1232 vmovdqu ($inp),$Ij # I[0] 1233 vpxor $Xlo,$Zlo,$Zlo 1234 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo 1235 vpxor $Xhi,$Zhi,$Zhi 1236 vpshufb $bswap,$Ij,$Ij 1237 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi 1238 vmovdqu 0xa0-0x40($Htbl),$Hkey # $Hkey^8 1239 vpxor $Xmi,$Zmi,$Zmi 1240 vpclmulqdq \$0x10,$HK,$T2,$Xmi 1241 1242 lea 0x80($inp),$inp 1243 cmp \$0x80,$len 1244 jb .Ltail_avx 1245 1246 vpxor $Xi,$Ij,$Ij # accumulate $Xi 1247 sub \$0x80,$len 1248 jmp .Loop8x_avx 1249 1250.align 32 1251.Loop8x_avx: 1252 vpunpckhqdq $Ij,$Ij,$T1 1253 vmovdqu 0x70($inp),$Ii # I[7] 1254 vpxor $Xlo,$Zlo,$Zlo 1255 vpxor $Ij,$T1,$T1 1256 vpclmulqdq \$0x00,$Hkey,$Ij,$Xi 1257 vpshufb $bswap,$Ii,$Ii 1258 vpxor $Xhi,$Zhi,$Zhi 1259 vpclmulqdq \$0x11,$Hkey,$Ij,$Xo 1260 vmovdqu 0x00-0x40($Htbl),$Hkey # $Hkey^1 1261 vpunpckhqdq $Ii,$Ii,$T2 1262 vpxor $Xmi,$Zmi,$Zmi 1263 vpclmulqdq \$0x00,$HK,$T1,$Tred 1264 vmovdqu 0x20-0x40($Htbl),$HK 1265 vpxor $Ii,$T2,$T2 1266 1267 vmovdqu 0x60($inp),$Ij # I[6] 1268 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo 1269 vpxor $Zlo,$Xi,$Xi # collect result 1270 vpshufb $bswap,$Ij,$Ij 1271 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi 1272 vxorps $Zhi,$Xo,$Xo 1273 vmovdqu 0x10-0x40($Htbl),$Hkey # $Hkey^2 1274 vpunpckhqdq $Ij,$Ij,$T1 1275 vpclmulqdq \$0x00,$HK, $T2,$Xmi 1276 vpxor $Zmi,$Tred,$Tred 1277 vxorps $Ij,$T1,$T1 1278 1279 vmovdqu 0x50($inp),$Ii # I[5] 1280 vpxor $Xi,$Tred,$Tred # aggregated Karatsuba post-processing 1281 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo 1282 vpxor $Xo,$Tred,$Tred 1283 vpslldq \$8,$Tred,$T2 1284 vpxor $Xlo,$Zlo,$Zlo 1285 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi 1286 vpsrldq \$8,$Tred,$Tred 1287 vpxor $T2, $Xi, $Xi 1288 vmovdqu 0x30-0x40($Htbl),$Hkey # $Hkey^3 1289 vpshufb $bswap,$Ii,$Ii 1290 vxorps $Tred,$Xo, $Xo 1291 vpxor $Xhi,$Zhi,$Zhi 1292 vpunpckhqdq $Ii,$Ii,$T2 1293 vpclmulqdq \$0x10,$HK, $T1,$Zmi 1294 vmovdqu 0x50-0x40($Htbl),$HK 1295 vpxor $Ii,$T2,$T2 1296 vpxor $Xmi,$Zmi,$Zmi 1297 1298 vmovdqu 0x40($inp),$Ij # I[4] 1299 vpalignr \$8,$Xi,$Xi,$Tred # 1st phase 1300 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo 1301 vpshufb $bswap,$Ij,$Ij 1302 vpxor $Zlo,$Xlo,$Xlo 1303 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi 1304 vmovdqu 0x40-0x40($Htbl),$Hkey # $Hkey^4 1305 vpunpckhqdq $Ij,$Ij,$T1 1306 vpxor $Zhi,$Xhi,$Xhi 1307 vpclmulqdq \$0x00,$HK, $T2,$Xmi 1308 vxorps $Ij,$T1,$T1 1309 vpxor $Zmi,$Xmi,$Xmi 1310 1311 vmovdqu 0x30($inp),$Ii # I[3] 1312 vpclmulqdq \$0x10,(%r10),$Xi,$Xi 1313 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo 1314 vpshufb $bswap,$Ii,$Ii 1315 vpxor $Xlo,$Zlo,$Zlo 1316 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi 1317 vmovdqu 0x60-0x40($Htbl),$Hkey # $Hkey^5 1318 vpunpckhqdq $Ii,$Ii,$T2 1319 vpxor $Xhi,$Zhi,$Zhi 1320 vpclmulqdq \$0x10,$HK, $T1,$Zmi 1321 vmovdqu 0x80-0x40($Htbl),$HK 1322 vpxor $Ii,$T2,$T2 1323 vpxor $Xmi,$Zmi,$Zmi 1324 1325 vmovdqu 0x20($inp),$Ij # I[2] 1326 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo 1327 vpshufb $bswap,$Ij,$Ij 1328 vpxor $Zlo,$Xlo,$Xlo 1329 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi 1330 vmovdqu 0x70-0x40($Htbl),$Hkey # $Hkey^6 1331 vpunpckhqdq $Ij,$Ij,$T1 1332 vpxor $Zhi,$Xhi,$Xhi 1333 vpclmulqdq \$0x00,$HK, $T2,$Xmi 1334 vpxor $Ij,$T1,$T1 1335 vpxor $Zmi,$Xmi,$Xmi 1336 vxorps $Tred,$Xi,$Xi 1337 1338 vmovdqu 0x10($inp),$Ii # I[1] 1339 vpalignr \$8,$Xi,$Xi,$Tred # 2nd phase 1340 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo 1341 vpshufb $bswap,$Ii,$Ii 1342 vpxor $Xlo,$Zlo,$Zlo 1343 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi 1344 vmovdqu 0x90-0x40($Htbl),$Hkey # $Hkey^7 1345 vpclmulqdq \$0x10,(%r10),$Xi,$Xi 1346 vxorps $Xo,$Tred,$Tred 1347 vpunpckhqdq $Ii,$Ii,$T2 1348 vpxor $Xhi,$Zhi,$Zhi 1349 vpclmulqdq \$0x10,$HK, $T1,$Zmi 1350 vmovdqu 0xb0-0x40($Htbl),$HK 1351 vpxor $Ii,$T2,$T2 1352 vpxor $Xmi,$Zmi,$Zmi 1353 1354 vmovdqu ($inp),$Ij # I[0] 1355 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo 1356 vpshufb $bswap,$Ij,$Ij 1357 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi 1358 vmovdqu 0xa0-0x40($Htbl),$Hkey # $Hkey^8 1359 vpxor $Tred,$Ij,$Ij 1360 vpclmulqdq \$0x10,$HK, $T2,$Xmi 1361 vpxor $Xi,$Ij,$Ij # accumulate $Xi 1362 1363 lea 0x80($inp),$inp 1364 sub \$0x80,$len 1365 jnc .Loop8x_avx 1366 1367 add \$0x80,$len 1368 jmp .Ltail_no_xor_avx 1369 1370.align 32 1371.Lshort_avx: 1372 vmovdqu -0x10($inp,$len),$Ii # very last word 1373 lea ($inp,$len),$inp 1374 vmovdqu 0x00-0x40($Htbl),$Hkey # $Hkey^1 1375 vmovdqu 0x20-0x40($Htbl),$HK 1376 vpshufb $bswap,$Ii,$Ij 1377 1378 vmovdqa $Xlo,$Zlo # subtle way to zero $Zlo, 1379 vmovdqa $Xhi,$Zhi # $Zhi and 1380 vmovdqa $Xmi,$Zmi # $Zmi 1381 sub \$0x10,$len 1382 jz .Ltail_avx 1383 1384 vpunpckhqdq $Ij,$Ij,$T1 1385 vpxor $Xlo,$Zlo,$Zlo 1386 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo 1387 vpxor $Ij,$T1,$T1 1388 vmovdqu -0x20($inp),$Ii 1389 vpxor $Xhi,$Zhi,$Zhi 1390 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi 1391 vmovdqu 0x10-0x40($Htbl),$Hkey # $Hkey^2 1392 vpshufb $bswap,$Ii,$Ij 1393 vpxor $Xmi,$Zmi,$Zmi 1394 vpclmulqdq \$0x00,$HK,$T1,$Xmi 1395 vpsrldq \$8,$HK,$HK 1396 sub \$0x10,$len 1397 jz .Ltail_avx 1398 1399 vpunpckhqdq $Ij,$Ij,$T1 1400 vpxor $Xlo,$Zlo,$Zlo 1401 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo 1402 vpxor $Ij,$T1,$T1 1403 vmovdqu -0x30($inp),$Ii 1404 vpxor $Xhi,$Zhi,$Zhi 1405 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi 1406 vmovdqu 0x30-0x40($Htbl),$Hkey # $Hkey^3 1407 vpshufb $bswap,$Ii,$Ij 1408 vpxor $Xmi,$Zmi,$Zmi 1409 vpclmulqdq \$0x00,$HK,$T1,$Xmi 1410 vmovdqu 0x50-0x40($Htbl),$HK 1411 sub \$0x10,$len 1412 jz .Ltail_avx 1413 1414 vpunpckhqdq $Ij,$Ij,$T1 1415 vpxor $Xlo,$Zlo,$Zlo 1416 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo 1417 vpxor $Ij,$T1,$T1 1418 vmovdqu -0x40($inp),$Ii 1419 vpxor $Xhi,$Zhi,$Zhi 1420 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi 1421 vmovdqu 0x40-0x40($Htbl),$Hkey # $Hkey^4 1422 vpshufb $bswap,$Ii,$Ij 1423 vpxor $Xmi,$Zmi,$Zmi 1424 vpclmulqdq \$0x00,$HK,$T1,$Xmi 1425 vpsrldq \$8,$HK,$HK 1426 sub \$0x10,$len 1427 jz .Ltail_avx 1428 1429 vpunpckhqdq $Ij,$Ij,$T1 1430 vpxor $Xlo,$Zlo,$Zlo 1431 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo 1432 vpxor $Ij,$T1,$T1 1433 vmovdqu -0x50($inp),$Ii 1434 vpxor $Xhi,$Zhi,$Zhi 1435 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi 1436 vmovdqu 0x60-0x40($Htbl),$Hkey # $Hkey^5 1437 vpshufb $bswap,$Ii,$Ij 1438 vpxor $Xmi,$Zmi,$Zmi 1439 vpclmulqdq \$0x00,$HK,$T1,$Xmi 1440 vmovdqu 0x80-0x40($Htbl),$HK 1441 sub \$0x10,$len 1442 jz .Ltail_avx 1443 1444 vpunpckhqdq $Ij,$Ij,$T1 1445 vpxor $Xlo,$Zlo,$Zlo 1446 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo 1447 vpxor $Ij,$T1,$T1 1448 vmovdqu -0x60($inp),$Ii 1449 vpxor $Xhi,$Zhi,$Zhi 1450 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi 1451 vmovdqu 0x70-0x40($Htbl),$Hkey # $Hkey^6 1452 vpshufb $bswap,$Ii,$Ij 1453 vpxor $Xmi,$Zmi,$Zmi 1454 vpclmulqdq \$0x00,$HK,$T1,$Xmi 1455 vpsrldq \$8,$HK,$HK 1456 sub \$0x10,$len 1457 jz .Ltail_avx 1458 1459 vpunpckhqdq $Ij,$Ij,$T1 1460 vpxor $Xlo,$Zlo,$Zlo 1461 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo 1462 vpxor $Ij,$T1,$T1 1463 vmovdqu -0x70($inp),$Ii 1464 vpxor $Xhi,$Zhi,$Zhi 1465 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi 1466 vmovdqu 0x90-0x40($Htbl),$Hkey # $Hkey^7 1467 vpshufb $bswap,$Ii,$Ij 1468 vpxor $Xmi,$Zmi,$Zmi 1469 vpclmulqdq \$0x00,$HK,$T1,$Xmi 1470 vmovq 0xb8-0x40($Htbl),$HK 1471 sub \$0x10,$len 1472 jmp .Ltail_avx 1473 1474.align 32 1475.Ltail_avx: 1476 vpxor $Xi,$Ij,$Ij # accumulate $Xi 1477.Ltail_no_xor_avx: 1478 vpunpckhqdq $Ij,$Ij,$T1 1479 vpxor $Xlo,$Zlo,$Zlo 1480 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo 1481 vpxor $Ij,$T1,$T1 1482 vpxor $Xhi,$Zhi,$Zhi 1483 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi 1484 vpxor $Xmi,$Zmi,$Zmi 1485 vpclmulqdq \$0x00,$HK,$T1,$Xmi 1486 1487 vmovdqu (%r10),$Tred 1488 1489 vpxor $Xlo,$Zlo,$Xi 1490 vpxor $Xhi,$Zhi,$Xo 1491 vpxor $Xmi,$Zmi,$Zmi 1492 1493 vpxor $Xi, $Zmi,$Zmi # aggregated Karatsuba post-processing 1494 vpxor $Xo, $Zmi,$Zmi 1495 vpslldq \$8, $Zmi,$T2 1496 vpsrldq \$8, $Zmi,$Zmi 1497 vpxor $T2, $Xi, $Xi 1498 vpxor $Zmi,$Xo, $Xo 1499 1500 vpclmulqdq \$0x10,$Tred,$Xi,$T2 # 1st phase 1501 vpalignr \$8,$Xi,$Xi,$Xi 1502 vpxor $T2,$Xi,$Xi 1503 1504 vpclmulqdq \$0x10,$Tred,$Xi,$T2 # 2nd phase 1505 vpalignr \$8,$Xi,$Xi,$Xi 1506 vpxor $Xo,$Xi,$Xi 1507 vpxor $T2,$Xi,$Xi 1508 1509 cmp \$0,$len 1510 jne .Lshort_avx 1511 1512 vpshufb $bswap,$Xi,$Xi 1513 vmovdqu $Xi,($Xip) 1514 vzeroupper 1515___ 1516$code.=<<___ if ($win64); 1517 movaps (%rsp),%xmm6 1518 movaps 0x10(%rsp),%xmm7 1519 movaps 0x20(%rsp),%xmm8 1520 movaps 0x30(%rsp),%xmm9 1521 movaps 0x40(%rsp),%xmm10 1522 movaps 0x50(%rsp),%xmm11 1523 movaps 0x60(%rsp),%xmm12 1524 movaps 0x70(%rsp),%xmm13 1525 movaps 0x80(%rsp),%xmm14 1526 movaps 0x90(%rsp),%xmm15 1527 lea 0xa8(%rsp),%rsp 1528.LSEH_end_gcm_ghash_avx: 1529___ 1530$code.=<<___; 1531 ret 1532.size gcm_ghash_avx,.-gcm_ghash_avx 1533___ 1534} else { 1535$code.=<<___; 1536 jmp .L_ghash_clmul 1537.size gcm_ghash_avx,.-gcm_ghash_avx 1538___ 1539} 1540 1541$code.=<<___; 1542.align 64 1543.Lbswap_mask: 1544 .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 1545.L0x1c2_polynomial: 1546 .byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2 1547.L7_mask: 1548 .long 7,0,7,0 1549.L7_mask_poly: 1550 .long 7,0,`0xE1<<1`,0 1551.align 64 1552.type .Lrem_4bit,\@object 1553.Lrem_4bit: 1554 .long 0,`0x0000<<16`,0,`0x1C20<<16`,0,`0x3840<<16`,0,`0x2460<<16` 1555 .long 0,`0x7080<<16`,0,`0x6CA0<<16`,0,`0x48C0<<16`,0,`0x54E0<<16` 1556 .long 0,`0xE100<<16`,0,`0xFD20<<16`,0,`0xD940<<16`,0,`0xC560<<16` 1557 .long 0,`0x9180<<16`,0,`0x8DA0<<16`,0,`0xA9C0<<16`,0,`0xB5E0<<16` 1558.type .Lrem_8bit,\@object 1559.Lrem_8bit: 1560 .value 0x0000,0x01C2,0x0384,0x0246,0x0708,0x06CA,0x048C,0x054E 1561 .value 0x0E10,0x0FD2,0x0D94,0x0C56,0x0918,0x08DA,0x0A9C,0x0B5E 1562 .value 0x1C20,0x1DE2,0x1FA4,0x1E66,0x1B28,0x1AEA,0x18AC,0x196E 1563 .value 0x1230,0x13F2,0x11B4,0x1076,0x1538,0x14FA,0x16BC,0x177E 1564 .value 0x3840,0x3982,0x3BC4,0x3A06,0x3F48,0x3E8A,0x3CCC,0x3D0E 1565 .value 0x3650,0x3792,0x35D4,0x3416,0x3158,0x309A,0x32DC,0x331E 1566 .value 0x2460,0x25A2,0x27E4,0x2626,0x2368,0x22AA,0x20EC,0x212E 1567 .value 0x2A70,0x2BB2,0x29F4,0x2836,0x2D78,0x2CBA,0x2EFC,0x2F3E 1568 .value 0x7080,0x7142,0x7304,0x72C6,0x7788,0x764A,0x740C,0x75CE 1569 .value 0x7E90,0x7F52,0x7D14,0x7CD6,0x7998,0x785A,0x7A1C,0x7BDE 1570 .value 0x6CA0,0x6D62,0x6F24,0x6EE6,0x6BA8,0x6A6A,0x682C,0x69EE 1571 .value 0x62B0,0x6372,0x6134,0x60F6,0x65B8,0x647A,0x663C,0x67FE 1572 .value 0x48C0,0x4902,0x4B44,0x4A86,0x4FC8,0x4E0A,0x4C4C,0x4D8E 1573 .value 0x46D0,0x4712,0x4554,0x4496,0x41D8,0x401A,0x425C,0x439E 1574 .value 0x54E0,0x5522,0x5764,0x56A6,0x53E8,0x522A,0x506C,0x51AE 1575 .value 0x5AF0,0x5B32,0x5974,0x58B6,0x5DF8,0x5C3A,0x5E7C,0x5FBE 1576 .value 0xE100,0xE0C2,0xE284,0xE346,0xE608,0xE7CA,0xE58C,0xE44E 1577 .value 0xEF10,0xEED2,0xEC94,0xED56,0xE818,0xE9DA,0xEB9C,0xEA5E 1578 .value 0xFD20,0xFCE2,0xFEA4,0xFF66,0xFA28,0xFBEA,0xF9AC,0xF86E 1579 .value 0xF330,0xF2F2,0xF0B4,0xF176,0xF438,0xF5FA,0xF7BC,0xF67E 1580 .value 0xD940,0xD882,0xDAC4,0xDB06,0xDE48,0xDF8A,0xDDCC,0xDC0E 1581 .value 0xD750,0xD692,0xD4D4,0xD516,0xD058,0xD19A,0xD3DC,0xD21E 1582 .value 0xC560,0xC4A2,0xC6E4,0xC726,0xC268,0xC3AA,0xC1EC,0xC02E 1583 .value 0xCB70,0xCAB2,0xC8F4,0xC936,0xCC78,0xCDBA,0xCFFC,0xCE3E 1584 .value 0x9180,0x9042,0x9204,0x93C6,0x9688,0x974A,0x950C,0x94CE 1585 .value 0x9F90,0x9E52,0x9C14,0x9DD6,0x9898,0x995A,0x9B1C,0x9ADE 1586 .value 0x8DA0,0x8C62,0x8E24,0x8FE6,0x8AA8,0x8B6A,0x892C,0x88EE 1587 .value 0x83B0,0x8272,0x8034,0x81F6,0x84B8,0x857A,0x873C,0x86FE 1588 .value 0xA9C0,0xA802,0xAA44,0xAB86,0xAEC8,0xAF0A,0xAD4C,0xAC8E 1589 .value 0xA7D0,0xA612,0xA454,0xA596,0xA0D8,0xA11A,0xA35C,0xA29E 1590 .value 0xB5E0,0xB422,0xB664,0xB7A6,0xB2E8,0xB32A,0xB16C,0xB0AE 1591 .value 0xBBF0,0xBA32,0xB874,0xB9B6,0xBCF8,0xBD3A,0xBF7C,0xBEBE 1592 1593.asciz "GHASH for x86_64, CRYPTOGAMS by <appro\@openssl.org>" 1594.align 64 1595___ 1596 1597# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame, 1598# CONTEXT *context,DISPATCHER_CONTEXT *disp) 1599if ($win64) { 1600$rec="%rcx"; 1601$frame="%rdx"; 1602$context="%r8"; 1603$disp="%r9"; 1604 1605$code.=<<___; 1606.extern __imp_RtlVirtualUnwind 1607.type se_handler,\@abi-omnipotent 1608.align 16 1609se_handler: 1610 push %rsi 1611 push %rdi 1612 push %rbx 1613 push %rbp 1614 push %r12 1615 push %r13 1616 push %r14 1617 push %r15 1618 pushfq 1619 sub \$64,%rsp 1620 1621 mov 120($context),%rax # pull context->Rax 1622 mov 248($context),%rbx # pull context->Rip 1623 1624 mov 8($disp),%rsi # disp->ImageBase 1625 mov 56($disp),%r11 # disp->HandlerData 1626 1627 mov 0(%r11),%r10d # HandlerData[0] 1628 lea (%rsi,%r10),%r10 # prologue label 1629 cmp %r10,%rbx # context->Rip<prologue label 1630 jb .Lin_prologue 1631 1632 mov 152($context),%rax # pull context->Rsp 1633 1634 mov 4(%r11),%r10d # HandlerData[1] 1635 lea (%rsi,%r10),%r10 # epilogue label 1636 cmp %r10,%rbx # context->Rip>=epilogue label 1637 jae .Lin_prologue 1638 1639 lea 48+280(%rax),%rax # adjust "rsp" 1640 1641 mov -8(%rax),%rbx 1642 mov -16(%rax),%rbp 1643 mov -24(%rax),%r12 1644 mov -32(%rax),%r13 1645 mov -40(%rax),%r14 1646 mov -48(%rax),%r15 1647 mov %rbx,144($context) # restore context->Rbx 1648 mov %rbp,160($context) # restore context->Rbp 1649 mov %r12,216($context) # restore context->R12 1650 mov %r13,224($context) # restore context->R13 1651 mov %r14,232($context) # restore context->R14 1652 mov %r15,240($context) # restore context->R15 1653 1654.Lin_prologue: 1655 mov 8(%rax),%rdi 1656 mov 16(%rax),%rsi 1657 mov %rax,152($context) # restore context->Rsp 1658 mov %rsi,168($context) # restore context->Rsi 1659 mov %rdi,176($context) # restore context->Rdi 1660 1661 mov 40($disp),%rdi # disp->ContextRecord 1662 mov $context,%rsi # context 1663 mov \$`1232/8`,%ecx # sizeof(CONTEXT) 1664 .long 0xa548f3fc # cld; rep movsq 1665 1666 mov $disp,%rsi 1667 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER 1668 mov 8(%rsi),%rdx # arg2, disp->ImageBase 1669 mov 0(%rsi),%r8 # arg3, disp->ControlPc 1670 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry 1671 mov 40(%rsi),%r10 # disp->ContextRecord 1672 lea 56(%rsi),%r11 # &disp->HandlerData 1673 lea 24(%rsi),%r12 # &disp->EstablisherFrame 1674 mov %r10,32(%rsp) # arg5 1675 mov %r11,40(%rsp) # arg6 1676 mov %r12,48(%rsp) # arg7 1677 mov %rcx,56(%rsp) # arg8, (NULL) 1678 call *__imp_RtlVirtualUnwind(%rip) 1679 1680 mov \$1,%eax # ExceptionContinueSearch 1681 add \$64,%rsp 1682 popfq 1683 pop %r15 1684 pop %r14 1685 pop %r13 1686 pop %r12 1687 pop %rbp 1688 pop %rbx 1689 pop %rdi 1690 pop %rsi 1691 ret 1692.size se_handler,.-se_handler 1693 1694.section .pdata 1695.align 4 1696 .rva .LSEH_begin_gcm_gmult_4bit 1697 .rva .LSEH_end_gcm_gmult_4bit 1698 .rva .LSEH_info_gcm_gmult_4bit 1699 1700 .rva .LSEH_begin_gcm_ghash_4bit 1701 .rva .LSEH_end_gcm_ghash_4bit 1702 .rva .LSEH_info_gcm_ghash_4bit 1703 1704 .rva .LSEH_begin_gcm_init_clmul 1705 .rva .LSEH_end_gcm_init_clmul 1706 .rva .LSEH_info_gcm_init_clmul 1707 1708 .rva .LSEH_begin_gcm_ghash_clmul 1709 .rva .LSEH_end_gcm_ghash_clmul 1710 .rva .LSEH_info_gcm_ghash_clmul 1711___ 1712$code.=<<___ if ($avx); 1713 .rva .LSEH_begin_gcm_init_avx 1714 .rva .LSEH_end_gcm_init_avx 1715 .rva .LSEH_info_gcm_init_clmul 1716 1717 .rva .LSEH_begin_gcm_ghash_avx 1718 .rva .LSEH_end_gcm_ghash_avx 1719 .rva .LSEH_info_gcm_ghash_clmul 1720___ 1721$code.=<<___; 1722.section .xdata 1723.align 8 1724.LSEH_info_gcm_gmult_4bit: 1725 .byte 9,0,0,0 1726 .rva se_handler 1727 .rva .Lgmult_prologue,.Lgmult_epilogue # HandlerData 1728.LSEH_info_gcm_ghash_4bit: 1729 .byte 9,0,0,0 1730 .rva se_handler 1731 .rva .Lghash_prologue,.Lghash_epilogue # HandlerData 1732.LSEH_info_gcm_init_clmul: 1733 .byte 0x01,0x08,0x03,0x00 1734 .byte 0x08,0x68,0x00,0x00 #movaps 0x00(rsp),xmm6 1735 .byte 0x04,0x22,0x00,0x00 #sub rsp,0x18 1736.LSEH_info_gcm_ghash_clmul: 1737 .byte 0x01,0x33,0x16,0x00 1738 .byte 0x33,0xf8,0x09,0x00 #movaps 0x90(rsp),xmm15 1739 .byte 0x2e,0xe8,0x08,0x00 #movaps 0x80(rsp),xmm14 1740 .byte 0x29,0xd8,0x07,0x00 #movaps 0x70(rsp),xmm13 1741 .byte 0x24,0xc8,0x06,0x00 #movaps 0x60(rsp),xmm12 1742 .byte 0x1f,0xb8,0x05,0x00 #movaps 0x50(rsp),xmm11 1743 .byte 0x1a,0xa8,0x04,0x00 #movaps 0x40(rsp),xmm10 1744 .byte 0x15,0x98,0x03,0x00 #movaps 0x30(rsp),xmm9 1745 .byte 0x10,0x88,0x02,0x00 #movaps 0x20(rsp),xmm8 1746 .byte 0x0c,0x78,0x01,0x00 #movaps 0x10(rsp),xmm7 1747 .byte 0x08,0x68,0x00,0x00 #movaps 0x00(rsp),xmm6 1748 .byte 0x04,0x01,0x15,0x00 #sub rsp,0xa8 1749___ 1750} 1751 1752$code =~ s/\`([^\`]*)\`/eval($1)/gem; 1753 1754print $code; 1755 1756close STDOUT; 1757