1#! /usr/bin/env perl 2# Copyright 1998-2020 The OpenSSL Project Authors. All Rights Reserved. 3# 4# Licensed under the OpenSSL license (the "License"). You may not use 5# this file except in compliance with the License. You can obtain a copy 6# in the file LICENSE in the source distribution or at 7# https://www.openssl.org/source/license.html 8 9 10# ==================================================================== 11# [Re]written by Andy Polyakov <appro@openssl.org> for the OpenSSL 12# project. The module is, however, dual licensed under OpenSSL and 13# CRYPTOGAMS licenses depending on where you obtain it. For further 14# details see http://www.openssl.org/~appro/cryptogams/. 15# ==================================================================== 16 17# "[Re]written" was achieved in two major overhauls. In 2004 BODY_* 18# functions were re-implemented to address P4 performance issue [see 19# commentary below], and in 2006 the rest was rewritten in order to 20# gain freedom to liberate licensing terms. 21 22# January, September 2004. 23# 24# It was noted that Intel IA-32 C compiler generates code which 25# performs ~30% *faster* on P4 CPU than original *hand-coded* 26# SHA1 assembler implementation. To address this problem (and 27# prove that humans are still better than machines:-), the 28# original code was overhauled, which resulted in following 29# performance changes: 30# 31# compared with original compared with Intel cc 32# assembler impl. generated code 33# Pentium -16% +48% 34# PIII/AMD +8% +16% 35# P4 +85%(!) +45% 36# 37# As you can see Pentium came out as looser:-( Yet I reckoned that 38# improvement on P4 outweighs the loss and incorporate this 39# re-tuned code to 0.9.7 and later. 40# ---------------------------------------------------------------- 41 42# August 2009. 43# 44# George Spelvin has tipped that F_40_59(b,c,d) can be rewritten as 45# '(c&d) + (b&(c^d))', which allows to accumulate partial results 46# and lighten "pressure" on scratch registers. This resulted in 47# >12% performance improvement on contemporary AMD cores (with no 48# degradation on other CPUs:-). Also, the code was revised to maximize 49# "distance" between instructions producing input to 'lea' instruction 50# and the 'lea' instruction itself, which is essential for Intel Atom 51# core and resulted in ~15% improvement. 52 53# October 2010. 54# 55# Add SSSE3, Supplemental[!] SSE3, implementation. The idea behind it 56# is to offload message schedule denoted by Wt in NIST specification, 57# or Xupdate in OpenSSL source, to SIMD unit. The idea is not novel, 58# and in SSE2 context was first explored by Dean Gaudet in 2004, see 59# http://arctic.org/~dean/crypto/sha1.html. Since then several things 60# have changed that made it interesting again: 61# 62# a) XMM units became faster and wider; 63# b) instruction set became more versatile; 64# c) an important observation was made by Max Locktykhin, which made 65# it possible to reduce amount of instructions required to perform 66# the operation in question, for further details see 67# http://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1/. 68 69# April 2011. 70# 71# Add AVX code path, probably most controversial... The thing is that 72# switch to AVX alone improves performance by as little as 4% in 73# comparison to SSSE3 code path. But below result doesn't look like 74# 4% improvement... Trouble is that Sandy Bridge decodes 'ro[rl]' as 75# pair of µ-ops, and it's the additional µ-ops, two per round, that 76# make it run slower than Core2 and Westmere. But 'sh[rl]d' is decoded 77# as single µ-op by Sandy Bridge and it's replacing 'ro[rl]' with 78# equivalent 'sh[rl]d' that is responsible for the impressive 5.1 79# cycles per processed byte. But 'sh[rl]d' is not something that used 80# to be fast, nor does it appear to be fast in upcoming Bulldozer 81# [according to its optimization manual]. Which is why AVX code path 82# is guarded by *both* AVX and synthetic bit denoting Intel CPUs. 83# One can argue that it's unfair to AMD, but without 'sh[rl]d' it 84# makes no sense to keep the AVX code path. If somebody feels that 85# strongly, it's probably more appropriate to discuss possibility of 86# using vector rotate XOP on AMD... 87 88# March 2014. 89# 90# Add support for Intel SHA Extensions. 91 92###################################################################### 93# Current performance is summarized in following table. Numbers are 94# CPU clock cycles spent to process single byte (less is better). 95# 96# x86 SSSE3 AVX 97# Pentium 15.7 - 98# PIII 11.5 - 99# P4 10.6 - 100# AMD K8 7.1 - 101# Core2 7.3 6.0/+22% - 102# Westmere 7.3 5.5/+33% - 103# Sandy Bridge 8.8 6.2/+40% 5.1(**)/+73% 104# Ivy Bridge 7.2 4.8/+51% 4.7(**)/+53% 105# Haswell 6.5 4.3/+51% 4.1(**)/+58% 106# Skylake 6.4 4.1/+55% 4.1(**)/+55% 107# Bulldozer 11.6 6.0/+92% 108# VIA Nano 10.6 7.5/+41% 109# Atom 12.5 9.3(*)/+35% 110# Silvermont 14.5 9.9(*)/+46% 111# Goldmont 8.8 6.7/+30% 1.7(***)/+415% 112# 113# (*) Loop is 1056 instructions long and expected result is ~8.25. 114# The discrepancy is because of front-end limitations, so 115# called MS-ROM penalties, and on Silvermont even rotate's 116# limited parallelism. 117# 118# (**) As per above comment, the result is for AVX *plus* sh[rl]d. 119# 120# (***) SHAEXT result 121 122$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; 123push(@INC,"${dir}","${dir}../../perlasm"); 124require "x86asm.pl"; 125 126$output=pop; 127open STDOUT,">$output"; 128 129&asm_init($ARGV[0],$ARGV[$#ARGV] eq "386"); 130 131$xmm=$ymm=0; 132for (@ARGV) { $xmm=1 if (/-DOPENSSL_IA32_SSE2/); } 133 134$ymm=1 if ($xmm && 135 `$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1` 136 =~ /GNU assembler version ([2-9]\.[0-9]+)/ && 137 $1>=2.19); # first version supporting AVX 138 139$ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32n" && 140 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/ && 141 $1>=2.03); # first version supporting AVX 142 143$ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32" && 144 `ml 2>&1` =~ /Version ([0-9]+)\./ && 145 $1>=10); # first version supporting AVX 146 147$ymm=1 if ($xmm && !$ymm && `$ENV{CC} -v 2>&1` =~ /((?:clang|LLVM) version|based on LLVM) ([0-9]+\.[0-9]+)/ && 148 $2>=3.0); # first version supporting AVX 149 150$shaext=$xmm; ### set to zero if compiling for 1.0.1 151 152&external_label("OPENSSL_ia32cap_P") if ($xmm); 153 154 155$A="eax"; 156$B="ebx"; 157$C="ecx"; 158$D="edx"; 159$E="edi"; 160$T="esi"; 161$tmp1="ebp"; 162 163@V=($A,$B,$C,$D,$E,$T); 164 165$alt=0; # 1 denotes alternative IALU implementation, which performs 166 # 8% *worse* on P4, same on Westmere and Atom, 2% better on 167 # Sandy Bridge... 168 169sub BODY_00_15 170 { 171 local($n,$a,$b,$c,$d,$e,$f)=@_; 172 173 &comment("00_15 $n"); 174 175 &mov($f,$c); # f to hold F_00_19(b,c,d) 176 if ($n==0) { &mov($tmp1,$a); } 177 else { &mov($a,$tmp1); } 178 &rotl($tmp1,5); # tmp1=ROTATE(a,5) 179 &xor($f,$d); 180 &add($tmp1,$e); # tmp1+=e; 181 &mov($e,&swtmp($n%16)); # e becomes volatile and is loaded 182 # with xi, also note that e becomes 183 # f in next round... 184 &and($f,$b); 185 &rotr($b,2); # b=ROTATE(b,30) 186 &xor($f,$d); # f holds F_00_19(b,c,d) 187 &lea($tmp1,&DWP(0x5a827999,$tmp1,$e)); # tmp1+=K_00_19+xi 188 189 if ($n==15) { &mov($e,&swtmp(($n+1)%16));# pre-fetch f for next round 190 &add($f,$tmp1); } # f+=tmp1 191 else { &add($tmp1,$f); } # f becomes a in next round 192 &mov($tmp1,$a) if ($alt && $n==15); 193 } 194 195sub BODY_16_19 196 { 197 local($n,$a,$b,$c,$d,$e,$f)=@_; 198 199 &comment("16_19 $n"); 200 201if ($alt) { 202 &xor($c,$d); 203 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) 204 &and($tmp1,$c); # tmp1 to hold F_00_19(b,c,d), b&=c^d 205 &xor($f,&swtmp(($n+8)%16)); 206 &xor($tmp1,$d); # tmp1=F_00_19(b,c,d) 207 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd 208 &rotl($f,1); # f=ROTATE(f,1) 209 &add($e,$tmp1); # e+=F_00_19(b,c,d) 210 &xor($c,$d); # restore $c 211 &mov($tmp1,$a); # b in next round 212 &rotr($b,$n==16?2:7); # b=ROTATE(b,30) 213 &mov(&swtmp($n%16),$f); # xi=f 214 &rotl($a,5); # ROTATE(a,5) 215 &lea($f,&DWP(0x5a827999,$f,$e));# f+=F_00_19(b,c,d)+e 216 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round 217 &add($f,$a); # f+=ROTATE(a,5) 218} else { 219 &mov($tmp1,$c); # tmp1 to hold F_00_19(b,c,d) 220 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) 221 &xor($tmp1,$d); 222 &xor($f,&swtmp(($n+8)%16)); 223 &and($tmp1,$b); 224 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd 225 &rotl($f,1); # f=ROTATE(f,1) 226 &xor($tmp1,$d); # tmp1=F_00_19(b,c,d) 227 &add($e,$tmp1); # e+=F_00_19(b,c,d) 228 &mov($tmp1,$a); 229 &rotr($b,2); # b=ROTATE(b,30) 230 &mov(&swtmp($n%16),$f); # xi=f 231 &rotl($tmp1,5); # ROTATE(a,5) 232 &lea($f,&DWP(0x5a827999,$f,$e));# f+=F_00_19(b,c,d)+e 233 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round 234 &add($f,$tmp1); # f+=ROTATE(a,5) 235} 236 } 237 238sub BODY_20_39 239 { 240 local($n,$a,$b,$c,$d,$e,$f)=@_; 241 local $K=($n<40)?0x6ed9eba1:0xca62c1d6; 242 243 &comment("20_39 $n"); 244 245if ($alt) { 246 &xor($tmp1,$c); # tmp1 to hold F_20_39(b,c,d), b^=c 247 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) 248 &xor($tmp1,$d); # tmp1 holds F_20_39(b,c,d) 249 &xor($f,&swtmp(($n+8)%16)); 250 &add($e,$tmp1); # e+=F_20_39(b,c,d) 251 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd 252 &rotl($f,1); # f=ROTATE(f,1) 253 &mov($tmp1,$a); # b in next round 254 &rotr($b,7); # b=ROTATE(b,30) 255 &mov(&swtmp($n%16),$f) if($n<77);# xi=f 256 &rotl($a,5); # ROTATE(a,5) 257 &xor($b,$c) if($n==39);# warm up for BODY_40_59 258 &and($tmp1,$b) if($n==39); 259 &lea($f,&DWP($K,$f,$e)); # f+=e+K_XX_YY 260 &mov($e,&swtmp(($n+1)%16)) if($n<79);# pre-fetch f for next round 261 &add($f,$a); # f+=ROTATE(a,5) 262 &rotr($a,5) if ($n==79); 263} else { 264 &mov($tmp1,$b); # tmp1 to hold F_20_39(b,c,d) 265 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) 266 &xor($tmp1,$c); 267 &xor($f,&swtmp(($n+8)%16)); 268 &xor($tmp1,$d); # tmp1 holds F_20_39(b,c,d) 269 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd 270 &rotl($f,1); # f=ROTATE(f,1) 271 &add($e,$tmp1); # e+=F_20_39(b,c,d) 272 &rotr($b,2); # b=ROTATE(b,30) 273 &mov($tmp1,$a); 274 &rotl($tmp1,5); # ROTATE(a,5) 275 &mov(&swtmp($n%16),$f) if($n<77);# xi=f 276 &lea($f,&DWP($K,$f,$e)); # f+=e+K_XX_YY 277 &mov($e,&swtmp(($n+1)%16)) if($n<79);# pre-fetch f for next round 278 &add($f,$tmp1); # f+=ROTATE(a,5) 279} 280 } 281 282sub BODY_40_59 283 { 284 local($n,$a,$b,$c,$d,$e,$f)=@_; 285 286 &comment("40_59 $n"); 287 288if ($alt) { 289 &add($e,$tmp1); # e+=b&(c^d) 290 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) 291 &mov($tmp1,$d); 292 &xor($f,&swtmp(($n+8)%16)); 293 &xor($c,$d); # restore $c 294 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd 295 &rotl($f,1); # f=ROTATE(f,1) 296 &and($tmp1,$c); 297 &rotr($b,7); # b=ROTATE(b,30) 298 &add($e,$tmp1); # e+=c&d 299 &mov($tmp1,$a); # b in next round 300 &mov(&swtmp($n%16),$f); # xi=f 301 &rotl($a,5); # ROTATE(a,5) 302 &xor($b,$c) if ($n<59); 303 &and($tmp1,$b) if ($n<59);# tmp1 to hold F_40_59(b,c,d) 304 &lea($f,&DWP(0x8f1bbcdc,$f,$e));# f+=K_40_59+e+(b&(c^d)) 305 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round 306 &add($f,$a); # f+=ROTATE(a,5) 307} else { 308 &mov($tmp1,$c); # tmp1 to hold F_40_59(b,c,d) 309 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) 310 &xor($tmp1,$d); 311 &xor($f,&swtmp(($n+8)%16)); 312 &and($tmp1,$b); 313 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd 314 &rotl($f,1); # f=ROTATE(f,1) 315 &add($tmp1,$e); # b&(c^d)+=e 316 &rotr($b,2); # b=ROTATE(b,30) 317 &mov($e,$a); # e becomes volatile 318 &rotl($e,5); # ROTATE(a,5) 319 &mov(&swtmp($n%16),$f); # xi=f 320 &lea($f,&DWP(0x8f1bbcdc,$f,$tmp1));# f+=K_40_59+e+(b&(c^d)) 321 &mov($tmp1,$c); 322 &add($f,$e); # f+=ROTATE(a,5) 323 &and($tmp1,$d); 324 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round 325 &add($f,$tmp1); # f+=c&d 326} 327 } 328 329&function_begin("sha1_block_data_order"); 330if ($xmm) { 331 &static_label("shaext_shortcut") if ($shaext); 332 &static_label("ssse3_shortcut"); 333 &static_label("avx_shortcut") if ($ymm); 334 &static_label("K_XX_XX"); 335 336 &call (&label("pic_point")); # make it PIC! 337 &set_label("pic_point"); 338 &blindpop($tmp1); 339 &picmeup($T,"OPENSSL_ia32cap_P",$tmp1,&label("pic_point")); 340 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1)); 341 342 &mov ($A,&DWP(0,$T)); 343 &mov ($D,&DWP(4,$T)); 344 &test ($D,1<<9); # check SSSE3 bit 345 &jz (&label("x86")); 346 &mov ($C,&DWP(8,$T)); 347 &test ($A,1<<24); # check FXSR bit 348 &jz (&label("x86")); 349 if ($shaext) { 350 &test ($C,1<<29); # check SHA bit 351 &jnz (&label("shaext_shortcut")); 352 } 353 if ($ymm) { 354 &and ($D,1<<28); # mask AVX bit 355 &and ($A,1<<30); # mask "Intel CPU" bit 356 &or ($A,$D); 357 &cmp ($A,1<<28|1<<30); 358 &je (&label("avx_shortcut")); 359 } 360 &jmp (&label("ssse3_shortcut")); 361 &set_label("x86",16); 362} 363 &mov($tmp1,&wparam(0)); # SHA_CTX *c 364 &mov($T,&wparam(1)); # const void *input 365 &mov($A,&wparam(2)); # size_t num 366 &stack_push(16+3); # allocate X[16] 367 &shl($A,6); 368 &add($A,$T); 369 &mov(&wparam(2),$A); # pointer beyond the end of input 370 &mov($E,&DWP(16,$tmp1));# pre-load E 371 &jmp(&label("loop")); 372 373&set_label("loop",16); 374 375 # copy input chunk to X, but reversing byte order! 376 for ($i=0; $i<16; $i+=4) 377 { 378 &mov($A,&DWP(4*($i+0),$T)); 379 &mov($B,&DWP(4*($i+1),$T)); 380 &mov($C,&DWP(4*($i+2),$T)); 381 &mov($D,&DWP(4*($i+3),$T)); 382 &bswap($A); 383 &bswap($B); 384 &bswap($C); 385 &bswap($D); 386 &mov(&swtmp($i+0),$A); 387 &mov(&swtmp($i+1),$B); 388 &mov(&swtmp($i+2),$C); 389 &mov(&swtmp($i+3),$D); 390 } 391 &mov(&wparam(1),$T); # redundant in 1st spin 392 393 &mov($A,&DWP(0,$tmp1)); # load SHA_CTX 394 &mov($B,&DWP(4,$tmp1)); 395 &mov($C,&DWP(8,$tmp1)); 396 &mov($D,&DWP(12,$tmp1)); 397 # E is pre-loaded 398 399 for($i=0;$i<16;$i++) { &BODY_00_15($i,@V); unshift(@V,pop(@V)); } 400 for(;$i<20;$i++) { &BODY_16_19($i,@V); unshift(@V,pop(@V)); } 401 for(;$i<40;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); } 402 for(;$i<60;$i++) { &BODY_40_59($i,@V); unshift(@V,pop(@V)); } 403 for(;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); } 404 405 (($V[5] eq $D) and ($V[0] eq $E)) or die; # double-check 406 407 &mov($tmp1,&wparam(0)); # re-load SHA_CTX* 408 &mov($D,&wparam(1)); # D is last "T" and is discarded 409 410 &add($E,&DWP(0,$tmp1)); # E is last "A"... 411 &add($T,&DWP(4,$tmp1)); 412 &add($A,&DWP(8,$tmp1)); 413 &add($B,&DWP(12,$tmp1)); 414 &add($C,&DWP(16,$tmp1)); 415 416 &mov(&DWP(0,$tmp1),$E); # update SHA_CTX 417 &add($D,64); # advance input pointer 418 &mov(&DWP(4,$tmp1),$T); 419 &cmp($D,&wparam(2)); # have we reached the end yet? 420 &mov(&DWP(8,$tmp1),$A); 421 &mov($E,$C); # C is last "E" which needs to be "pre-loaded" 422 &mov(&DWP(12,$tmp1),$B); 423 &mov($T,$D); # input pointer 424 &mov(&DWP(16,$tmp1),$C); 425 &jb(&label("loop")); 426 427 &stack_pop(16+3); 428&function_end("sha1_block_data_order"); 429 430if ($xmm) { 431if ($shaext) { 432###################################################################### 433# Intel SHA Extensions implementation of SHA1 update function. 434# 435my ($ctx,$inp,$num)=("edi","esi","ecx"); 436my ($ABCD,$E,$E_,$BSWAP)=map("xmm$_",(0..3)); 437my @MSG=map("xmm$_",(4..7)); 438 439sub sha1rnds4 { 440 my ($dst,$src,$imm)=@_; 441 if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/) 442 { &data_byte(0x0f,0x3a,0xcc,0xc0|($1<<3)|$2,$imm); } 443} 444sub sha1op38 { 445 my ($opcodelet,$dst,$src)=@_; 446 if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/) 447 { &data_byte(0x0f,0x38,$opcodelet,0xc0|($1<<3)|$2); } 448} 449sub sha1nexte { sha1op38(0xc8,@_); } 450sub sha1msg1 { sha1op38(0xc9,@_); } 451sub sha1msg2 { sha1op38(0xca,@_); } 452 453&function_begin("_sha1_block_data_order_shaext"); 454 &call (&label("pic_point")); # make it PIC! 455 &set_label("pic_point"); 456 &blindpop($tmp1); 457 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1)); 458&set_label("shaext_shortcut"); 459 &mov ($ctx,&wparam(0)); 460 &mov ("ebx","esp"); 461 &mov ($inp,&wparam(1)); 462 &mov ($num,&wparam(2)); 463 &sub ("esp",32); 464 465 &movdqu ($ABCD,&QWP(0,$ctx)); 466 &movd ($E,&DWP(16,$ctx)); 467 &and ("esp",-32); 468 &movdqa ($BSWAP,&QWP(0x50,$tmp1)); # byte-n-word swap 469 470 &movdqu (@MSG[0],&QWP(0,$inp)); 471 &pshufd ($ABCD,$ABCD,0b00011011); # flip word order 472 &movdqu (@MSG[1],&QWP(0x10,$inp)); 473 &pshufd ($E,$E,0b00011011); # flip word order 474 &movdqu (@MSG[2],&QWP(0x20,$inp)); 475 &pshufb (@MSG[0],$BSWAP); 476 &movdqu (@MSG[3],&QWP(0x30,$inp)); 477 &pshufb (@MSG[1],$BSWAP); 478 &pshufb (@MSG[2],$BSWAP); 479 &pshufb (@MSG[3],$BSWAP); 480 &jmp (&label("loop_shaext")); 481 482&set_label("loop_shaext",16); 483 &dec ($num); 484 &lea ("eax",&DWP(0x40,$inp)); 485 &movdqa (&QWP(0,"esp"),$E); # offload $E 486 &paddd ($E,@MSG[0]); 487 &cmovne ($inp,"eax"); 488 &movdqa (&QWP(16,"esp"),$ABCD); # offload $ABCD 489 490for($i=0;$i<20-4;$i+=2) { 491 &sha1msg1 (@MSG[0],@MSG[1]); 492 &movdqa ($E_,$ABCD); 493 &sha1rnds4 ($ABCD,$E,int($i/5)); # 0-3... 494 &sha1nexte ($E_,@MSG[1]); 495 &pxor (@MSG[0],@MSG[2]); 496 &sha1msg1 (@MSG[1],@MSG[2]); 497 &sha1msg2 (@MSG[0],@MSG[3]); 498 499 &movdqa ($E,$ABCD); 500 &sha1rnds4 ($ABCD,$E_,int(($i+1)/5)); 501 &sha1nexte ($E,@MSG[2]); 502 &pxor (@MSG[1],@MSG[3]); 503 &sha1msg2 (@MSG[1],@MSG[0]); 504 505 push(@MSG,shift(@MSG)); push(@MSG,shift(@MSG)); 506} 507 &movdqu (@MSG[0],&QWP(0,$inp)); 508 &movdqa ($E_,$ABCD); 509 &sha1rnds4 ($ABCD,$E,3); # 64-67 510 &sha1nexte ($E_,@MSG[1]); 511 &movdqu (@MSG[1],&QWP(0x10,$inp)); 512 &pshufb (@MSG[0],$BSWAP); 513 514 &movdqa ($E,$ABCD); 515 &sha1rnds4 ($ABCD,$E_,3); # 68-71 516 &sha1nexte ($E,@MSG[2]); 517 &movdqu (@MSG[2],&QWP(0x20,$inp)); 518 &pshufb (@MSG[1],$BSWAP); 519 520 &movdqa ($E_,$ABCD); 521 &sha1rnds4 ($ABCD,$E,3); # 72-75 522 &sha1nexte ($E_,@MSG[3]); 523 &movdqu (@MSG[3],&QWP(0x30,$inp)); 524 &pshufb (@MSG[2],$BSWAP); 525 526 &movdqa ($E,$ABCD); 527 &sha1rnds4 ($ABCD,$E_,3); # 76-79 528 &movdqa ($E_,&QWP(0,"esp")); 529 &pshufb (@MSG[3],$BSWAP); 530 &sha1nexte ($E,$E_); 531 &paddd ($ABCD,&QWP(16,"esp")); 532 533 &jnz (&label("loop_shaext")); 534 535 &pshufd ($ABCD,$ABCD,0b00011011); 536 &pshufd ($E,$E,0b00011011); 537 &movdqu (&QWP(0,$ctx),$ABCD) 538 &movd (&DWP(16,$ctx),$E); 539 &mov ("esp","ebx"); 540&function_end("_sha1_block_data_order_shaext"); 541} 542###################################################################### 543# The SSSE3 implementation. 544# 545# %xmm[0-7] are used as ring @X[] buffer containing quadruples of last 546# 32 elements of the message schedule or Xupdate outputs. First 4 547# quadruples are simply byte-swapped input, next 4 are calculated 548# according to method originally suggested by Dean Gaudet (modulo 549# being implemented in SSSE3). Once 8 quadruples or 32 elements are 550# collected, it switches to routine proposed by Max Locktyukhin. 551# 552# Calculations inevitably require temporary registers, and there are 553# no %xmm registers left to spare. For this reason part of the ring 554# buffer, X[2..4] to be specific, is offloaded to 3 quadriples ring 555# buffer on the stack. Keep in mind that X[2] is alias X[-6], X[3] - 556# X[-5], and X[4] - X[-4]... 557# 558# Another notable optimization is aggressive stack frame compression 559# aiming to minimize amount of 9-byte instructions... 560# 561# Yet another notable optimization is "jumping" $B variable. It means 562# that there is no register permanently allocated for $B value. This 563# allowed to eliminate one instruction from body_20_39... 564# 565my $Xi=4; # 4xSIMD Xupdate round, start pre-seeded 566my @X=map("xmm$_",(4..7,0..3)); # pre-seeded for $Xi=4 567my @V=($A,$B,$C,$D,$E); 568my $j=0; # hash round 569my $rx=0; 570my @T=($T,$tmp1); 571my $inp; 572 573my $_rol=sub { &rol(@_) }; 574my $_ror=sub { &ror(@_) }; 575 576&function_begin("_sha1_block_data_order_ssse3"); 577 &call (&label("pic_point")); # make it PIC! 578 &set_label("pic_point"); 579 &blindpop($tmp1); 580 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1)); 581&set_label("ssse3_shortcut"); 582 583 &movdqa (@X[3],&QWP(0,$tmp1)); # K_00_19 584 &movdqa (@X[4],&QWP(16,$tmp1)); # K_20_39 585 &movdqa (@X[5],&QWP(32,$tmp1)); # K_40_59 586 &movdqa (@X[6],&QWP(48,$tmp1)); # K_60_79 587 &movdqa (@X[2],&QWP(64,$tmp1)); # pbswap mask 588 589 &mov ($E,&wparam(0)); # load argument block 590 &mov ($inp=@T[1],&wparam(1)); 591 &mov ($D,&wparam(2)); 592 &mov (@T[0],"esp"); 593 594 # stack frame layout 595 # 596 # +0 X[0]+K X[1]+K X[2]+K X[3]+K # XMM->IALU xfer area 597 # X[4]+K X[5]+K X[6]+K X[7]+K 598 # X[8]+K X[9]+K X[10]+K X[11]+K 599 # X[12]+K X[13]+K X[14]+K X[15]+K 600 # 601 # +64 X[0] X[1] X[2] X[3] # XMM->XMM backtrace area 602 # X[4] X[5] X[6] X[7] 603 # X[8] X[9] X[10] X[11] # even borrowed for K_00_19 604 # 605 # +112 K_20_39 K_20_39 K_20_39 K_20_39 # constants 606 # K_40_59 K_40_59 K_40_59 K_40_59 607 # K_60_79 K_60_79 K_60_79 K_60_79 608 # K_00_19 K_00_19 K_00_19 K_00_19 609 # pbswap mask 610 # 611 # +192 ctx # argument block 612 # +196 inp 613 # +200 end 614 # +204 esp 615 &sub ("esp",208); 616 &and ("esp",-64); 617 618 &movdqa (&QWP(112+0,"esp"),@X[4]); # copy constants 619 &movdqa (&QWP(112+16,"esp"),@X[5]); 620 &movdqa (&QWP(112+32,"esp"),@X[6]); 621 &shl ($D,6); # len*64 622 &movdqa (&QWP(112+48,"esp"),@X[3]); 623 &add ($D,$inp); # end of input 624 &movdqa (&QWP(112+64,"esp"),@X[2]); 625 &add ($inp,64); 626 &mov (&DWP(192+0,"esp"),$E); # save argument block 627 &mov (&DWP(192+4,"esp"),$inp); 628 &mov (&DWP(192+8,"esp"),$D); 629 &mov (&DWP(192+12,"esp"),@T[0]); # save original %esp 630 631 &mov ($A,&DWP(0,$E)); # load context 632 &mov ($B,&DWP(4,$E)); 633 &mov ($C,&DWP(8,$E)); 634 &mov ($D,&DWP(12,$E)); 635 &mov ($E,&DWP(16,$E)); 636 &mov (@T[0],$B); # magic seed 637 638 &movdqu (@X[-4&7],&QWP(-64,$inp)); # load input to %xmm[0-3] 639 &movdqu (@X[-3&7],&QWP(-48,$inp)); 640 &movdqu (@X[-2&7],&QWP(-32,$inp)); 641 &movdqu (@X[-1&7],&QWP(-16,$inp)); 642 &pshufb (@X[-4&7],@X[2]); # byte swap 643 &pshufb (@X[-3&7],@X[2]); 644 &pshufb (@X[-2&7],@X[2]); 645 &movdqa (&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot 646 &pshufb (@X[-1&7],@X[2]); 647 &paddd (@X[-4&7],@X[3]); # add K_00_19 648 &paddd (@X[-3&7],@X[3]); 649 &paddd (@X[-2&7],@X[3]); 650 &movdqa (&QWP(0,"esp"),@X[-4&7]); # X[]+K xfer to IALU 651 &psubd (@X[-4&7],@X[3]); # restore X[] 652 &movdqa (&QWP(0+16,"esp"),@X[-3&7]); 653 &psubd (@X[-3&7],@X[3]); 654 &movdqa (&QWP(0+32,"esp"),@X[-2&7]); 655 &mov (@T[1],$C); 656 &psubd (@X[-2&7],@X[3]); 657 &xor (@T[1],$D); 658 &pshufd (@X[0],@X[-4&7],0xee); # was &movdqa (@X[0],@X[-3&7]); 659 &and (@T[0],@T[1]); 660 &jmp (&label("loop")); 661 662###################################################################### 663# SSE instruction sequence is first broken to groups of independent 664# instructions, independent in respect to their inputs and shifter 665# (not all architectures have more than one). Then IALU instructions 666# are "knitted in" between the SSE groups. Distance is maintained for 667# SSE latency of 2 in hope that it fits better upcoming AMD Bulldozer 668# [which allegedly also implements SSSE3]... 669# 670# Temporary registers usage. X[2] is volatile at the entry and at the 671# end is restored from backtrace ring buffer. X[3] is expected to 672# contain current K_XX_XX constant and is used to calculate X[-1]+K 673# from previous round, it becomes volatile the moment the value is 674# saved to stack for transfer to IALU. X[4] becomes volatile whenever 675# X[-4] is accumulated and offloaded to backtrace ring buffer, at the 676# end it is loaded with next K_XX_XX [which becomes X[3] in next 677# round]... 678# 679sub Xupdate_ssse3_16_31() # recall that $Xi starts with 4 680{ use integer; 681 my $body = shift; 682 my @insns = (&$body,&$body,&$body,&$body); # 40 instructions 683 my ($a,$b,$c,$d,$e); 684 685 eval(shift(@insns)); # ror 686 eval(shift(@insns)); 687 eval(shift(@insns)); 688 &punpcklqdq(@X[0],@X[-3&7]); # compose "X[-14]" in "X[0]", was &palignr(@X[0],@X[-4&7],8); 689 &movdqa (@X[2],@X[-1&7]); 690 eval(shift(@insns)); 691 eval(shift(@insns)); 692 693 &paddd (@X[3],@X[-1&7]); 694 &movdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]);# save X[] to backtrace buffer 695 eval(shift(@insns)); # rol 696 eval(shift(@insns)); 697 &psrldq (@X[2],4); # "X[-3]", 3 dwords 698 eval(shift(@insns)); 699 eval(shift(@insns)); 700 &pxor (@X[0],@X[-4&7]); # "X[0]"^="X[-16]" 701 eval(shift(@insns)); 702 eval(shift(@insns)); # ror 703 704 &pxor (@X[2],@X[-2&7]); # "X[-3]"^"X[-8]" 705 eval(shift(@insns)); 706 eval(shift(@insns)); 707 eval(shift(@insns)); 708 709 &pxor (@X[0],@X[2]); # "X[0]"^="X[-3]"^"X[-8]" 710 eval(shift(@insns)); 711 eval(shift(@insns)); # rol 712 &movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU 713 eval(shift(@insns)); 714 eval(shift(@insns)); 715 716 &movdqa (@X[4],@X[0]); 717 eval(shift(@insns)); 718 eval(shift(@insns)); 719 eval(shift(@insns)); # ror 720 &movdqa (@X[2],@X[0]); 721 eval(shift(@insns)); 722 723 &pslldq (@X[4],12); # "X[0]"<<96, extract one dword 724 &paddd (@X[0],@X[0]); 725 eval(shift(@insns)); 726 eval(shift(@insns)); 727 728 &psrld (@X[2],31); 729 eval(shift(@insns)); 730 eval(shift(@insns)); # rol 731 &movdqa (@X[3],@X[4]); 732 eval(shift(@insns)); 733 eval(shift(@insns)); 734 eval(shift(@insns)); 735 736 &psrld (@X[4],30); 737 eval(shift(@insns)); 738 eval(shift(@insns)); # ror 739 &por (@X[0],@X[2]); # "X[0]"<<<=1 740 eval(shift(@insns)); 741 &movdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if ($Xi>5); # restore X[] from backtrace buffer 742 eval(shift(@insns)); 743 eval(shift(@insns)); 744 745 &pslld (@X[3],2); 746 eval(shift(@insns)); 747 eval(shift(@insns)); # rol 748 &pxor (@X[0],@X[4]); 749 &movdqa (@X[4],&QWP(112-16+16*(($Xi)/5),"esp")); # K_XX_XX 750 eval(shift(@insns)); 751 eval(shift(@insns)); 752 753 &pxor (@X[0],@X[3]); # "X[0]"^=("X[0]"<<96)<<<2 754 &pshufd (@X[1],@X[-3&7],0xee) if ($Xi<7); # was &movdqa (@X[1],@X[-2&7]) 755 &pshufd (@X[3],@X[-1&7],0xee) if ($Xi==7); 756 eval(shift(@insns)); 757 eval(shift(@insns)); 758 759 foreach (@insns) { eval; } # remaining instructions [if any] 760 761 $Xi++; push(@X,shift(@X)); # "rotate" X[] 762} 763 764sub Xupdate_ssse3_32_79() 765{ use integer; 766 my $body = shift; 767 my @insns = (&$body,&$body,&$body,&$body); # 32 to 44 instructions 768 my ($a,$b,$c,$d,$e); 769 770 eval(shift(@insns)); # body_20_39 771 &pxor (@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]" 772 &punpcklqdq(@X[2],@X[-1&7]); # compose "X[-6]", was &palignr(@X[2],@X[-2&7],8) 773 eval(shift(@insns)); 774 eval(shift(@insns)); 775 eval(shift(@insns)); # rol 776 777 &pxor (@X[0],@X[-7&7]); # "X[0]"^="X[-28]" 778 &movdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]); # save X[] to backtrace buffer 779 eval(shift(@insns)); 780 eval(shift(@insns)); 781 eval(shift(@insns)) if (@insns[0] =~ /_rol/); 782 if ($Xi%5) { 783 &movdqa (@X[4],@X[3]); # "perpetuate" K_XX_XX... 784 } else { # ... or load next one 785 &movdqa (@X[4],&QWP(112-16+16*($Xi/5),"esp")); 786 } 787 eval(shift(@insns)); # ror 788 &paddd (@X[3],@X[-1&7]); 789 eval(shift(@insns)); 790 791 &pxor (@X[0],@X[2]); # "X[0]"^="X[-6]" 792 eval(shift(@insns)); # body_20_39 793 eval(shift(@insns)); 794 eval(shift(@insns)); 795 eval(shift(@insns)); # rol 796 797 &movdqa (@X[2],@X[0]); 798 &movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU 799 eval(shift(@insns)); 800 eval(shift(@insns)); 801 eval(shift(@insns)); # ror 802 eval(shift(@insns)); 803 eval(shift(@insns)) if (@insns[0] =~ /_rol/); 804 805 &pslld (@X[0],2); 806 eval(shift(@insns)); # body_20_39 807 eval(shift(@insns)); 808 &psrld (@X[2],30); 809 eval(shift(@insns)); 810 eval(shift(@insns)); # rol 811 eval(shift(@insns)); 812 eval(shift(@insns)); 813 eval(shift(@insns)); # ror 814 eval(shift(@insns)); 815 eval(shift(@insns)) if (@insns[1] =~ /_rol/); 816 eval(shift(@insns)) if (@insns[0] =~ /_rol/); 817 818 &por (@X[0],@X[2]); # "X[0]"<<<=2 819 eval(shift(@insns)); # body_20_39 820 eval(shift(@insns)); 821 &movdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if($Xi<19); # restore X[] from backtrace buffer 822 eval(shift(@insns)); 823 eval(shift(@insns)); # rol 824 eval(shift(@insns)); 825 eval(shift(@insns)); 826 eval(shift(@insns)); # ror 827 &pshufd (@X[3],@X[-1],0xee) if ($Xi<19); # was &movdqa (@X[3],@X[0]) 828 eval(shift(@insns)); 829 830 foreach (@insns) { eval; } # remaining instructions 831 832 $Xi++; push(@X,shift(@X)); # "rotate" X[] 833} 834 835sub Xuplast_ssse3_80() 836{ use integer; 837 my $body = shift; 838 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions 839 my ($a,$b,$c,$d,$e); 840 841 eval(shift(@insns)); 842 eval(shift(@insns)); 843 eval(shift(@insns)); 844 eval(shift(@insns)); 845 eval(shift(@insns)); 846 eval(shift(@insns)); 847 eval(shift(@insns)); 848 &paddd (@X[3],@X[-1&7]); 849 eval(shift(@insns)); 850 eval(shift(@insns)); 851 eval(shift(@insns)); 852 eval(shift(@insns)); 853 854 &movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer IALU 855 856 foreach (@insns) { eval; } # remaining instructions 857 858 &mov ($inp=@T[1],&DWP(192+4,"esp")); 859 &cmp ($inp,&DWP(192+8,"esp")); 860 &je (&label("done")); 861 862 &movdqa (@X[3],&QWP(112+48,"esp")); # K_00_19 863 &movdqa (@X[2],&QWP(112+64,"esp")); # pbswap mask 864 &movdqu (@X[-4&7],&QWP(0,$inp)); # load input 865 &movdqu (@X[-3&7],&QWP(16,$inp)); 866 &movdqu (@X[-2&7],&QWP(32,$inp)); 867 &movdqu (@X[-1&7],&QWP(48,$inp)); 868 &add ($inp,64); 869 &pshufb (@X[-4&7],@X[2]); # byte swap 870 &mov (&DWP(192+4,"esp"),$inp); 871 &movdqa (&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot 872 873 $Xi=0; 874} 875 876sub Xloop_ssse3() 877{ use integer; 878 my $body = shift; 879 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions 880 my ($a,$b,$c,$d,$e); 881 882 eval(shift(@insns)); 883 eval(shift(@insns)); 884 eval(shift(@insns)); 885 eval(shift(@insns)); 886 eval(shift(@insns)); 887 eval(shift(@insns)); 888 eval(shift(@insns)); 889 &pshufb (@X[($Xi-3)&7],@X[2]); 890 eval(shift(@insns)); 891 eval(shift(@insns)); 892 eval(shift(@insns)); 893 eval(shift(@insns)); 894 &paddd (@X[($Xi-4)&7],@X[3]); 895 eval(shift(@insns)); 896 eval(shift(@insns)); 897 eval(shift(@insns)); 898 eval(shift(@insns)); 899 &movdqa (&QWP(0+16*$Xi,"esp"),@X[($Xi-4)&7]); # X[]+K xfer to IALU 900 eval(shift(@insns)); 901 eval(shift(@insns)); 902 eval(shift(@insns)); 903 eval(shift(@insns)); 904 &psubd (@X[($Xi-4)&7],@X[3]); 905 906 foreach (@insns) { eval; } 907 $Xi++; 908} 909 910sub Xtail_ssse3() 911{ use integer; 912 my $body = shift; 913 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions 914 my ($a,$b,$c,$d,$e); 915 916 foreach (@insns) { eval; } 917} 918 919sub body_00_19 () { # ((c^d)&b)^d 920 # on start @T[0]=(c^d)&b 921 return &body_20_39() if ($rx==19); $rx++; 922 ( 923 '($a,$b,$c,$d,$e)=@V;'. 924 '&$_ror ($b,$j?7:2);', # $b>>>2 925 '&xor (@T[0],$d);', 926 '&mov (@T[1],$a);', # $b in next round 927 928 '&add ($e,&DWP(4*($j&15),"esp"));', # X[]+K xfer 929 '&xor ($b,$c);', # $c^$d for next round 930 931 '&$_rol ($a,5);', 932 '&add ($e,@T[0]);', 933 '&and (@T[1],$b);', # ($b&($c^$d)) for next round 934 935 '&xor ($b,$c);', # restore $b 936 '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));' 937 ); 938} 939 940sub body_20_39 () { # b^d^c 941 # on entry @T[0]=b^d 942 return &body_40_59() if ($rx==39); $rx++; 943 ( 944 '($a,$b,$c,$d,$e)=@V;'. 945 '&add ($e,&DWP(4*($j&15),"esp"));', # X[]+K xfer 946 '&xor (@T[0],$d) if($j==19);'. 947 '&xor (@T[0],$c) if($j> 19);', # ($b^$d^$c) 948 '&mov (@T[1],$a);', # $b in next round 949 950 '&$_rol ($a,5);', 951 '&add ($e,@T[0]);', 952 '&xor (@T[1],$c) if ($j< 79);', # $b^$d for next round 953 954 '&$_ror ($b,7);', # $b>>>2 955 '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));' 956 ); 957} 958 959sub body_40_59 () { # ((b^c)&(c^d))^c 960 # on entry @T[0]=(b^c), (c^=d) 961 $rx++; 962 ( 963 '($a,$b,$c,$d,$e)=@V;'. 964 '&add ($e,&DWP(4*($j&15),"esp"));', # X[]+K xfer 965 '&and (@T[0],$c) if ($j>=40);', # (b^c)&(c^d) 966 '&xor ($c,$d) if ($j>=40);', # restore $c 967 968 '&$_ror ($b,7);', # $b>>>2 969 '&mov (@T[1],$a);', # $b for next round 970 '&xor (@T[0],$c);', 971 972 '&$_rol ($a,5);', 973 '&add ($e,@T[0]);', 974 '&xor (@T[1],$c) if ($j==59);'. 975 '&xor (@T[1],$b) if ($j< 59);', # b^c for next round 976 977 '&xor ($b,$c) if ($j< 59);', # c^d for next round 978 '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));' 979 ); 980} 981###### 982sub bodyx_00_19 () { # ((c^d)&b)^d 983 # on start @T[0]=(b&c)^(~b&d), $e+=X[]+K 984 return &bodyx_20_39() if ($rx==19); $rx++; 985 ( 986 '($a,$b,$c,$d,$e)=@V;'. 987 988 '&rorx ($b,$b,2) if ($j==0);'. # $b>>>2 989 '&rorx ($b,@T[1],7) if ($j!=0);', # $b>>>2 990 '&lea ($e,&DWP(0,$e,@T[0]));', 991 '&rorx (@T[0],$a,5);', 992 993 '&andn (@T[1],$a,$c);', 994 '&and ($a,$b)', 995 '&add ($d,&DWP(4*(($j+1)&15),"esp"));', # X[]+K xfer 996 997 '&xor (@T[1],$a)', 998 '&add ($e,@T[0]);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));' 999 ); 1000} 1001 1002sub bodyx_20_39 () { # b^d^c 1003 # on start $b=b^c^d 1004 return &bodyx_40_59() if ($rx==39); $rx++; 1005 ( 1006 '($a,$b,$c,$d,$e)=@V;'. 1007 1008 '&add ($e,($j==19?@T[0]:$b))', 1009 '&rorx ($b,@T[1],7);', # $b>>>2 1010 '&rorx (@T[0],$a,5);', 1011 1012 '&xor ($a,$b) if ($j<79);', 1013 '&add ($d,&DWP(4*(($j+1)&15),"esp")) if ($j<79);', # X[]+K xfer 1014 '&xor ($a,$c) if ($j<79);', 1015 '&add ($e,@T[0]);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));' 1016 ); 1017} 1018 1019sub bodyx_40_59 () { # ((b^c)&(c^d))^c 1020 # on start $b=((b^c)&(c^d))^c 1021 return &bodyx_20_39() if ($rx==59); $rx++; 1022 ( 1023 '($a,$b,$c,$d,$e)=@V;'. 1024 1025 '&rorx (@T[0],$a,5)', 1026 '&lea ($e,&DWP(0,$e,$b))', 1027 '&rorx ($b,@T[1],7)', # $b>>>2 1028 '&add ($d,&DWP(4*(($j+1)&15),"esp"))', # X[]+K xfer 1029 1030 '&mov (@T[1],$c)', 1031 '&xor ($a,$b)', # b^c for next round 1032 '&xor (@T[1],$b)', # c^d for next round 1033 1034 '&and ($a,@T[1])', 1035 '&add ($e,@T[0])', 1036 '&xor ($a,$b)' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));' 1037 ); 1038} 1039 1040&set_label("loop",16); 1041 &Xupdate_ssse3_16_31(\&body_00_19); 1042 &Xupdate_ssse3_16_31(\&body_00_19); 1043 &Xupdate_ssse3_16_31(\&body_00_19); 1044 &Xupdate_ssse3_16_31(\&body_00_19); 1045 &Xupdate_ssse3_32_79(\&body_00_19); 1046 &Xupdate_ssse3_32_79(\&body_20_39); 1047 &Xupdate_ssse3_32_79(\&body_20_39); 1048 &Xupdate_ssse3_32_79(\&body_20_39); 1049 &Xupdate_ssse3_32_79(\&body_20_39); 1050 &Xupdate_ssse3_32_79(\&body_20_39); 1051 &Xupdate_ssse3_32_79(\&body_40_59); 1052 &Xupdate_ssse3_32_79(\&body_40_59); 1053 &Xupdate_ssse3_32_79(\&body_40_59); 1054 &Xupdate_ssse3_32_79(\&body_40_59); 1055 &Xupdate_ssse3_32_79(\&body_40_59); 1056 &Xupdate_ssse3_32_79(\&body_20_39); 1057 &Xuplast_ssse3_80(\&body_20_39); # can jump to "done" 1058 1059 $saved_j=$j; @saved_V=@V; 1060 1061 &Xloop_ssse3(\&body_20_39); 1062 &Xloop_ssse3(\&body_20_39); 1063 &Xloop_ssse3(\&body_20_39); 1064 1065 &mov (@T[1],&DWP(192,"esp")); # update context 1066 &add ($A,&DWP(0,@T[1])); 1067 &add (@T[0],&DWP(4,@T[1])); # $b 1068 &add ($C,&DWP(8,@T[1])); 1069 &mov (&DWP(0,@T[1]),$A); 1070 &add ($D,&DWP(12,@T[1])); 1071 &mov (&DWP(4,@T[1]),@T[0]); 1072 &add ($E,&DWP(16,@T[1])); 1073 &mov (&DWP(8,@T[1]),$C); 1074 &mov ($B,$C); 1075 &mov (&DWP(12,@T[1]),$D); 1076 &xor ($B,$D); 1077 &mov (&DWP(16,@T[1]),$E); 1078 &mov (@T[1],@T[0]); 1079 &pshufd (@X[0],@X[-4&7],0xee); # was &movdqa (@X[0],@X[-3&7]); 1080 &and (@T[0],$B); 1081 &mov ($B,$T[1]); 1082 1083 &jmp (&label("loop")); 1084 1085&set_label("done",16); $j=$saved_j; @V=@saved_V; 1086 1087 &Xtail_ssse3(\&body_20_39); 1088 &Xtail_ssse3(\&body_20_39); 1089 &Xtail_ssse3(\&body_20_39); 1090 1091 &mov (@T[1],&DWP(192,"esp")); # update context 1092 &add ($A,&DWP(0,@T[1])); 1093 &mov ("esp",&DWP(192+12,"esp")); # restore %esp 1094 &add (@T[0],&DWP(4,@T[1])); # $b 1095 &add ($C,&DWP(8,@T[1])); 1096 &mov (&DWP(0,@T[1]),$A); 1097 &add ($D,&DWP(12,@T[1])); 1098 &mov (&DWP(4,@T[1]),@T[0]); 1099 &add ($E,&DWP(16,@T[1])); 1100 &mov (&DWP(8,@T[1]),$C); 1101 &mov (&DWP(12,@T[1]),$D); 1102 &mov (&DWP(16,@T[1]),$E); 1103 1104&function_end("_sha1_block_data_order_ssse3"); 1105 1106$rx=0; # reset 1107 1108if ($ymm) { 1109my $Xi=4; # 4xSIMD Xupdate round, start pre-seeded 1110my @X=map("xmm$_",(4..7,0..3)); # pre-seeded for $Xi=4 1111my @V=($A,$B,$C,$D,$E); 1112my $j=0; # hash round 1113my @T=($T,$tmp1); 1114my $inp; 1115 1116my $_rol=sub { &shld(@_[0],@_) }; 1117my $_ror=sub { &shrd(@_[0],@_) }; 1118 1119&function_begin("_sha1_block_data_order_avx"); 1120 &call (&label("pic_point")); # make it PIC! 1121 &set_label("pic_point"); 1122 &blindpop($tmp1); 1123 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1)); 1124&set_label("avx_shortcut"); 1125 &vzeroall(); 1126 1127 &vmovdqa(@X[3],&QWP(0,$tmp1)); # K_00_19 1128 &vmovdqa(@X[4],&QWP(16,$tmp1)); # K_20_39 1129 &vmovdqa(@X[5],&QWP(32,$tmp1)); # K_40_59 1130 &vmovdqa(@X[6],&QWP(48,$tmp1)); # K_60_79 1131 &vmovdqa(@X[2],&QWP(64,$tmp1)); # pbswap mask 1132 1133 &mov ($E,&wparam(0)); # load argument block 1134 &mov ($inp=@T[1],&wparam(1)); 1135 &mov ($D,&wparam(2)); 1136 &mov (@T[0],"esp"); 1137 1138 # stack frame layout 1139 # 1140 # +0 X[0]+K X[1]+K X[2]+K X[3]+K # XMM->IALU xfer area 1141 # X[4]+K X[5]+K X[6]+K X[7]+K 1142 # X[8]+K X[9]+K X[10]+K X[11]+K 1143 # X[12]+K X[13]+K X[14]+K X[15]+K 1144 # 1145 # +64 X[0] X[1] X[2] X[3] # XMM->XMM backtrace area 1146 # X[4] X[5] X[6] X[7] 1147 # X[8] X[9] X[10] X[11] # even borrowed for K_00_19 1148 # 1149 # +112 K_20_39 K_20_39 K_20_39 K_20_39 # constants 1150 # K_40_59 K_40_59 K_40_59 K_40_59 1151 # K_60_79 K_60_79 K_60_79 K_60_79 1152 # K_00_19 K_00_19 K_00_19 K_00_19 1153 # pbswap mask 1154 # 1155 # +192 ctx # argument block 1156 # +196 inp 1157 # +200 end 1158 # +204 esp 1159 &sub ("esp",208); 1160 &and ("esp",-64); 1161 1162 &vmovdqa(&QWP(112+0,"esp"),@X[4]); # copy constants 1163 &vmovdqa(&QWP(112+16,"esp"),@X[5]); 1164 &vmovdqa(&QWP(112+32,"esp"),@X[6]); 1165 &shl ($D,6); # len*64 1166 &vmovdqa(&QWP(112+48,"esp"),@X[3]); 1167 &add ($D,$inp); # end of input 1168 &vmovdqa(&QWP(112+64,"esp"),@X[2]); 1169 &add ($inp,64); 1170 &mov (&DWP(192+0,"esp"),$E); # save argument block 1171 &mov (&DWP(192+4,"esp"),$inp); 1172 &mov (&DWP(192+8,"esp"),$D); 1173 &mov (&DWP(192+12,"esp"),@T[0]); # save original %esp 1174 1175 &mov ($A,&DWP(0,$E)); # load context 1176 &mov ($B,&DWP(4,$E)); 1177 &mov ($C,&DWP(8,$E)); 1178 &mov ($D,&DWP(12,$E)); 1179 &mov ($E,&DWP(16,$E)); 1180 &mov (@T[0],$B); # magic seed 1181 1182 &vmovdqu(@X[-4&7],&QWP(-64,$inp)); # load input to %xmm[0-3] 1183 &vmovdqu(@X[-3&7],&QWP(-48,$inp)); 1184 &vmovdqu(@X[-2&7],&QWP(-32,$inp)); 1185 &vmovdqu(@X[-1&7],&QWP(-16,$inp)); 1186 &vpshufb(@X[-4&7],@X[-4&7],@X[2]); # byte swap 1187 &vpshufb(@X[-3&7],@X[-3&7],@X[2]); 1188 &vpshufb(@X[-2&7],@X[-2&7],@X[2]); 1189 &vmovdqa(&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot 1190 &vpshufb(@X[-1&7],@X[-1&7],@X[2]); 1191 &vpaddd (@X[0],@X[-4&7],@X[3]); # add K_00_19 1192 &vpaddd (@X[1],@X[-3&7],@X[3]); 1193 &vpaddd (@X[2],@X[-2&7],@X[3]); 1194 &vmovdqa(&QWP(0,"esp"),@X[0]); # X[]+K xfer to IALU 1195 &mov (@T[1],$C); 1196 &vmovdqa(&QWP(0+16,"esp"),@X[1]); 1197 &xor (@T[1],$D); 1198 &vmovdqa(&QWP(0+32,"esp"),@X[2]); 1199 &and (@T[0],@T[1]); 1200 &jmp (&label("loop")); 1201 1202sub Xupdate_avx_16_31() # recall that $Xi starts with 4 1203{ use integer; 1204 my $body = shift; 1205 my @insns = (&$body,&$body,&$body,&$body); # 40 instructions 1206 my ($a,$b,$c,$d,$e); 1207 1208 eval(shift(@insns)); 1209 eval(shift(@insns)); 1210 &vpalignr(@X[0],@X[-3&7],@X[-4&7],8); # compose "X[-14]" in "X[0]" 1211 eval(shift(@insns)); 1212 eval(shift(@insns)); 1213 1214 &vpaddd (@X[3],@X[3],@X[-1&7]); 1215 &vmovdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]);# save X[] to backtrace buffer 1216 eval(shift(@insns)); 1217 eval(shift(@insns)); 1218 &vpsrldq(@X[2],@X[-1&7],4); # "X[-3]", 3 dwords 1219 eval(shift(@insns)); 1220 eval(shift(@insns)); 1221 &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"^="X[-16]" 1222 eval(shift(@insns)); 1223 eval(shift(@insns)); 1224 1225 &vpxor (@X[2],@X[2],@X[-2&7]); # "X[-3]"^"X[-8]" 1226 eval(shift(@insns)); 1227 eval(shift(@insns)); 1228 &vmovdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU 1229 eval(shift(@insns)); 1230 eval(shift(@insns)); 1231 1232 &vpxor (@X[0],@X[0],@X[2]); # "X[0]"^="X[-3]"^"X[-8]" 1233 eval(shift(@insns)); 1234 eval(shift(@insns)); 1235 eval(shift(@insns)); 1236 eval(shift(@insns)); 1237 1238 &vpsrld (@X[2],@X[0],31); 1239 eval(shift(@insns)); 1240 eval(shift(@insns)); 1241 eval(shift(@insns)); 1242 eval(shift(@insns)); 1243 1244 &vpslldq(@X[4],@X[0],12); # "X[0]"<<96, extract one dword 1245 &vpaddd (@X[0],@X[0],@X[0]); 1246 eval(shift(@insns)); 1247 eval(shift(@insns)); 1248 eval(shift(@insns)); 1249 eval(shift(@insns)); 1250 1251 &vpsrld (@X[3],@X[4],30); 1252 &vpor (@X[0],@X[0],@X[2]); # "X[0]"<<<=1 1253 eval(shift(@insns)); 1254 eval(shift(@insns)); 1255 eval(shift(@insns)); 1256 eval(shift(@insns)); 1257 1258 &vpslld (@X[4],@X[4],2); 1259 &vmovdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if ($Xi>5); # restore X[] from backtrace buffer 1260 eval(shift(@insns)); 1261 eval(shift(@insns)); 1262 &vpxor (@X[0],@X[0],@X[3]); 1263 eval(shift(@insns)); 1264 eval(shift(@insns)); 1265 eval(shift(@insns)); 1266 eval(shift(@insns)); 1267 1268 &vpxor (@X[0],@X[0],@X[4]); # "X[0]"^=("X[0]"<<96)<<<2 1269 eval(shift(@insns)); 1270 eval(shift(@insns)); 1271 &vmovdqa (@X[4],&QWP(112-16+16*(($Xi)/5),"esp")); # K_XX_XX 1272 eval(shift(@insns)); 1273 eval(shift(@insns)); 1274 1275 foreach (@insns) { eval; } # remaining instructions [if any] 1276 1277 $Xi++; push(@X,shift(@X)); # "rotate" X[] 1278} 1279 1280sub Xupdate_avx_32_79() 1281{ use integer; 1282 my $body = shift; 1283 my @insns = (&$body,&$body,&$body,&$body); # 32 to 44 instructions 1284 my ($a,$b,$c,$d,$e); 1285 1286 &vpalignr(@X[2],@X[-1&7],@X[-2&7],8); # compose "X[-6]" 1287 &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]" 1288 eval(shift(@insns)); # body_20_39 1289 eval(shift(@insns)); 1290 eval(shift(@insns)); 1291 eval(shift(@insns)); # rol 1292 1293 &vpxor (@X[0],@X[0],@X[-7&7]); # "X[0]"^="X[-28]" 1294 &vmovdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]); # save X[] to backtrace buffer 1295 eval(shift(@insns)); 1296 eval(shift(@insns)); 1297 if ($Xi%5) { 1298 &vmovdqa (@X[4],@X[3]); # "perpetuate" K_XX_XX... 1299 } else { # ... or load next one 1300 &vmovdqa (@X[4],&QWP(112-16+16*($Xi/5),"esp")); 1301 } 1302 &vpaddd (@X[3],@X[3],@X[-1&7]); 1303 eval(shift(@insns)); # ror 1304 eval(shift(@insns)); 1305 1306 &vpxor (@X[0],@X[0],@X[2]); # "X[0]"^="X[-6]" 1307 eval(shift(@insns)); # body_20_39 1308 eval(shift(@insns)); 1309 eval(shift(@insns)); 1310 eval(shift(@insns)); # rol 1311 1312 &vpsrld (@X[2],@X[0],30); 1313 &vmovdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU 1314 eval(shift(@insns)); 1315 eval(shift(@insns)); 1316 eval(shift(@insns)); # ror 1317 eval(shift(@insns)); 1318 1319 &vpslld (@X[0],@X[0],2); 1320 eval(shift(@insns)); # body_20_39 1321 eval(shift(@insns)); 1322 eval(shift(@insns)); 1323 eval(shift(@insns)); # rol 1324 eval(shift(@insns)); 1325 eval(shift(@insns)); 1326 eval(shift(@insns)); # ror 1327 eval(shift(@insns)); 1328 1329 &vpor (@X[0],@X[0],@X[2]); # "X[0]"<<<=2 1330 eval(shift(@insns)); # body_20_39 1331 eval(shift(@insns)); 1332 &vmovdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if($Xi<19); # restore X[] from backtrace buffer 1333 eval(shift(@insns)); 1334 eval(shift(@insns)); # rol 1335 eval(shift(@insns)); 1336 eval(shift(@insns)); 1337 eval(shift(@insns)); # ror 1338 eval(shift(@insns)); 1339 1340 foreach (@insns) { eval; } # remaining instructions 1341 1342 $Xi++; push(@X,shift(@X)); # "rotate" X[] 1343} 1344 1345sub Xuplast_avx_80() 1346{ use integer; 1347 my $body = shift; 1348 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions 1349 my ($a,$b,$c,$d,$e); 1350 1351 eval(shift(@insns)); 1352 &vpaddd (@X[3],@X[3],@X[-1&7]); 1353 eval(shift(@insns)); 1354 eval(shift(@insns)); 1355 eval(shift(@insns)); 1356 eval(shift(@insns)); 1357 1358 &vmovdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer IALU 1359 1360 foreach (@insns) { eval; } # remaining instructions 1361 1362 &mov ($inp=@T[1],&DWP(192+4,"esp")); 1363 &cmp ($inp,&DWP(192+8,"esp")); 1364 &je (&label("done")); 1365 1366 &vmovdqa(@X[3],&QWP(112+48,"esp")); # K_00_19 1367 &vmovdqa(@X[2],&QWP(112+64,"esp")); # pbswap mask 1368 &vmovdqu(@X[-4&7],&QWP(0,$inp)); # load input 1369 &vmovdqu(@X[-3&7],&QWP(16,$inp)); 1370 &vmovdqu(@X[-2&7],&QWP(32,$inp)); 1371 &vmovdqu(@X[-1&7],&QWP(48,$inp)); 1372 &add ($inp,64); 1373 &vpshufb(@X[-4&7],@X[-4&7],@X[2]); # byte swap 1374 &mov (&DWP(192+4,"esp"),$inp); 1375 &vmovdqa(&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot 1376 1377 $Xi=0; 1378} 1379 1380sub Xloop_avx() 1381{ use integer; 1382 my $body = shift; 1383 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions 1384 my ($a,$b,$c,$d,$e); 1385 1386 eval(shift(@insns)); 1387 eval(shift(@insns)); 1388 &vpshufb (@X[($Xi-3)&7],@X[($Xi-3)&7],@X[2]); 1389 eval(shift(@insns)); 1390 eval(shift(@insns)); 1391 &vpaddd (@X[$Xi&7],@X[($Xi-4)&7],@X[3]); 1392 eval(shift(@insns)); 1393 eval(shift(@insns)); 1394 eval(shift(@insns)); 1395 eval(shift(@insns)); 1396 &vmovdqa (&QWP(0+16*$Xi,"esp"),@X[$Xi&7]); # X[]+K xfer to IALU 1397 eval(shift(@insns)); 1398 eval(shift(@insns)); 1399 1400 foreach (@insns) { eval; } 1401 $Xi++; 1402} 1403 1404sub Xtail_avx() 1405{ use integer; 1406 my $body = shift; 1407 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions 1408 my ($a,$b,$c,$d,$e); 1409 1410 foreach (@insns) { eval; } 1411} 1412 1413&set_label("loop",16); 1414 &Xupdate_avx_16_31(\&body_00_19); 1415 &Xupdate_avx_16_31(\&body_00_19); 1416 &Xupdate_avx_16_31(\&body_00_19); 1417 &Xupdate_avx_16_31(\&body_00_19); 1418 &Xupdate_avx_32_79(\&body_00_19); 1419 &Xupdate_avx_32_79(\&body_20_39); 1420 &Xupdate_avx_32_79(\&body_20_39); 1421 &Xupdate_avx_32_79(\&body_20_39); 1422 &Xupdate_avx_32_79(\&body_20_39); 1423 &Xupdate_avx_32_79(\&body_20_39); 1424 &Xupdate_avx_32_79(\&body_40_59); 1425 &Xupdate_avx_32_79(\&body_40_59); 1426 &Xupdate_avx_32_79(\&body_40_59); 1427 &Xupdate_avx_32_79(\&body_40_59); 1428 &Xupdate_avx_32_79(\&body_40_59); 1429 &Xupdate_avx_32_79(\&body_20_39); 1430 &Xuplast_avx_80(\&body_20_39); # can jump to "done" 1431 1432 $saved_j=$j; @saved_V=@V; 1433 1434 &Xloop_avx(\&body_20_39); 1435 &Xloop_avx(\&body_20_39); 1436 &Xloop_avx(\&body_20_39); 1437 1438 &mov (@T[1],&DWP(192,"esp")); # update context 1439 &add ($A,&DWP(0,@T[1])); 1440 &add (@T[0],&DWP(4,@T[1])); # $b 1441 &add ($C,&DWP(8,@T[1])); 1442 &mov (&DWP(0,@T[1]),$A); 1443 &add ($D,&DWP(12,@T[1])); 1444 &mov (&DWP(4,@T[1]),@T[0]); 1445 &add ($E,&DWP(16,@T[1])); 1446 &mov ($B,$C); 1447 &mov (&DWP(8,@T[1]),$C); 1448 &xor ($B,$D); 1449 &mov (&DWP(12,@T[1]),$D); 1450 &mov (&DWP(16,@T[1]),$E); 1451 &mov (@T[1],@T[0]); 1452 &and (@T[0],$B); 1453 &mov ($B,@T[1]); 1454 1455 &jmp (&label("loop")); 1456 1457&set_label("done",16); $j=$saved_j; @V=@saved_V; 1458 1459 &Xtail_avx(\&body_20_39); 1460 &Xtail_avx(\&body_20_39); 1461 &Xtail_avx(\&body_20_39); 1462 1463 &vzeroall(); 1464 1465 &mov (@T[1],&DWP(192,"esp")); # update context 1466 &add ($A,&DWP(0,@T[1])); 1467 &mov ("esp",&DWP(192+12,"esp")); # restore %esp 1468 &add (@T[0],&DWP(4,@T[1])); # $b 1469 &add ($C,&DWP(8,@T[1])); 1470 &mov (&DWP(0,@T[1]),$A); 1471 &add ($D,&DWP(12,@T[1])); 1472 &mov (&DWP(4,@T[1]),@T[0]); 1473 &add ($E,&DWP(16,@T[1])); 1474 &mov (&DWP(8,@T[1]),$C); 1475 &mov (&DWP(12,@T[1]),$D); 1476 &mov (&DWP(16,@T[1]),$E); 1477&function_end("_sha1_block_data_order_avx"); 1478} 1479&set_label("K_XX_XX",64); 1480&data_word(0x5a827999,0x5a827999,0x5a827999,0x5a827999); # K_00_19 1481&data_word(0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1); # K_20_39 1482&data_word(0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc); # K_40_59 1483&data_word(0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6); # K_60_79 1484&data_word(0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f); # pbswap mask 1485&data_byte(0xf,0xe,0xd,0xc,0xb,0xa,0x9,0x8,0x7,0x6,0x5,0x4,0x3,0x2,0x1,0x0); 1486} 1487&asciz("SHA1 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>"); 1488 1489&asm_finish(); 1490 1491close STDOUT or die "error closing STDOUT: $!"; 1492