1#! /usr/bin/env perl 2# Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved. 3# 4# Licensed under the OpenSSL license (the "License"). You may not use 5# this file except in compliance with the License. You can obtain a copy 6# in the file LICENSE in the source distribution or at 7# https://www.openssl.org/source/license.html 8 9# 10# ==================================================================== 11# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL 12# project. The module is, however, dual licensed under OpenSSL and 13# CRYPTOGAMS licenses depending on where you obtain it. For further 14# details see http://www.openssl.org/~appro/cryptogams/. 15# ==================================================================== 16# 17# GHASH for ARMv8 Crypto Extension, 64-bit polynomial multiplication. 18# 19# June 2014 20# 21# Initial version was developed in tight cooperation with Ard 22# Biesheuvel <ard.biesheuvel@linaro.org> from bits-n-pieces from 23# other assembly modules. Just like aesv8-armx.pl this module 24# supports both AArch32 and AArch64 execution modes. 25# 26# July 2014 27# 28# Implement 2x aggregated reduction [see ghash-x86.pl for background 29# information]. 30# 31# Current performance in cycles per processed byte: 32# 33# PMULL[2] 32-bit NEON(*) 34# Apple A7 0.92 5.62 35# Cortex-A53 1.01 8.39 36# Cortex-A57 1.17 7.61 37# Denver 0.71 6.02 38# Mongoose 1.10 8.06 39# 40# (*) presented for reference/comparison purposes; 41 42$flavour = shift; 43$output = shift; 44 45$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; 46( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or 47( $xlate="${dir}../../../perlasm/arm-xlate.pl" and -f $xlate) or 48die "can't locate arm-xlate.pl"; 49 50open OUT,"| \"$^X\" $xlate $flavour $output"; 51*STDOUT=*OUT; 52 53$Xi="x0"; # argument block 54$Htbl="x1"; 55$inp="x2"; 56$len="x3"; 57 58$inc="x12"; 59 60{ 61my ($Xl,$Xm,$Xh,$IN)=map("q$_",(0..3)); 62my ($t0,$t1,$t2,$xC2,$H,$Hhl,$H2)=map("q$_",(8..14)); 63 64$code=<<___; 65#include <openssl/arm_arch.h> 66 67.text 68___ 69$code.=<<___ if ($flavour =~ /64/); 70#if !defined(__clang__) || defined(BORINGSSL_CLANG_SUPPORTS_DOT_ARCH) 71.arch armv8-a+crypto 72#endif 73___ 74$code.=<<___ if ($flavour !~ /64/); 75.fpu neon 76.code 32 77#undef __thumb2__ 78___ 79 80################################################################################ 81# void gcm_init_v8(u128 Htable[16],const u64 H[2]); 82# 83# input: 128-bit H - secret parameter E(K,0^128) 84# output: precomputed table filled with degrees of twisted H; 85# H is twisted to handle reverse bitness of GHASH; 86# only few of 16 slots of Htable[16] are used; 87# data is opaque to outside world (which allows to 88# optimize the code independently); 89# 90$code.=<<___; 91.global gcm_init_v8 92.type gcm_init_v8,%function 93.align 4 94gcm_init_v8: 95 vld1.64 {$t1},[x1] @ load input H 96 vmov.i8 $xC2,#0xe1 97 vshl.i64 $xC2,$xC2,#57 @ 0xc2.0 98 vext.8 $IN,$t1,$t1,#8 99 vshr.u64 $t2,$xC2,#63 100 vdup.32 $t1,${t1}[1] 101 vext.8 $t0,$t2,$xC2,#8 @ t0=0xc2....01 102 vshr.u64 $t2,$IN,#63 103 vshr.s32 $t1,$t1,#31 @ broadcast carry bit 104 vand $t2,$t2,$t0 105 vshl.i64 $IN,$IN,#1 106 vext.8 $t2,$t2,$t2,#8 107 vand $t0,$t0,$t1 108 vorr $IN,$IN,$t2 @ H<<<=1 109 veor $H,$IN,$t0 @ twisted H 110 vst1.64 {$H},[x0],#16 @ store Htable[0] 111 112 @ calculate H^2 113 vext.8 $t0,$H,$H,#8 @ Karatsuba pre-processing 114 vpmull.p64 $Xl,$H,$H 115 veor $t0,$t0,$H 116 vpmull2.p64 $Xh,$H,$H 117 vpmull.p64 $Xm,$t0,$t0 118 119 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing 120 veor $t2,$Xl,$Xh 121 veor $Xm,$Xm,$t1 122 veor $Xm,$Xm,$t2 123 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase 124 125 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result 126 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl 127 veor $Xl,$Xm,$t2 128 129 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase 130 vpmull.p64 $Xl,$Xl,$xC2 131 veor $t2,$t2,$Xh 132 veor $H2,$Xl,$t2 133 134 vext.8 $t1,$H2,$H2,#8 @ Karatsuba pre-processing 135 veor $t1,$t1,$H2 136 vext.8 $Hhl,$t0,$t1,#8 @ pack Karatsuba pre-processed 137 vst1.64 {$Hhl-$H2},[x0] @ store Htable[1..2] 138 139 ret 140.size gcm_init_v8,.-gcm_init_v8 141___ 142################################################################################ 143# void gcm_gmult_v8(u64 Xi[2],const u128 Htable[16]); 144# 145# input: Xi - current hash value; 146# Htable - table precomputed in gcm_init_v8; 147# output: Xi - next hash value Xi; 148# 149$code.=<<___; 150.global gcm_gmult_v8 151.type gcm_gmult_v8,%function 152.align 4 153gcm_gmult_v8: 154 vld1.64 {$t1},[$Xi] @ load Xi 155 vmov.i8 $xC2,#0xe1 156 vld1.64 {$H-$Hhl},[$Htbl] @ load twisted H, ... 157 vshl.u64 $xC2,$xC2,#57 158#ifndef __ARMEB__ 159 vrev64.8 $t1,$t1 160#endif 161 vext.8 $IN,$t1,$t1,#8 162 163 vpmull.p64 $Xl,$H,$IN @ H.lo·Xi.lo 164 veor $t1,$t1,$IN @ Karatsuba pre-processing 165 vpmull2.p64 $Xh,$H,$IN @ H.hi·Xi.hi 166 vpmull.p64 $Xm,$Hhl,$t1 @ (H.lo+H.hi)·(Xi.lo+Xi.hi) 167 168 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing 169 veor $t2,$Xl,$Xh 170 veor $Xm,$Xm,$t1 171 veor $Xm,$Xm,$t2 172 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase of reduction 173 174 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result 175 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl 176 veor $Xl,$Xm,$t2 177 178 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction 179 vpmull.p64 $Xl,$Xl,$xC2 180 veor $t2,$t2,$Xh 181 veor $Xl,$Xl,$t2 182 183#ifndef __ARMEB__ 184 vrev64.8 $Xl,$Xl 185#endif 186 vext.8 $Xl,$Xl,$Xl,#8 187 vst1.64 {$Xl},[$Xi] @ write out Xi 188 189 ret 190.size gcm_gmult_v8,.-gcm_gmult_v8 191___ 192################################################################################ 193# void gcm_ghash_v8(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len); 194# 195# input: table precomputed in gcm_init_v8; 196# current hash value Xi; 197# pointer to input data; 198# length of input data in bytes, but divisible by block size; 199# output: next hash value Xi; 200# 201$code.=<<___; 202.global gcm_ghash_v8 203.type gcm_ghash_v8,%function 204.align 4 205gcm_ghash_v8: 206___ 207$code.=<<___ if ($flavour !~ /64/); 208 vstmdb sp!,{d8-d15} @ 32-bit ABI says so 209___ 210$code.=<<___; 211 vld1.64 {$Xl},[$Xi] @ load [rotated] Xi 212 @ "[rotated]" means that 213 @ loaded value would have 214 @ to be rotated in order to 215 @ make it appear as in 216 @ alorithm specification 217 subs $len,$len,#32 @ see if $len is 32 or larger 218 mov $inc,#16 @ $inc is used as post- 219 @ increment for input pointer; 220 @ as loop is modulo-scheduled 221 @ $inc is zeroed just in time 222 @ to preclude oversteping 223 @ inp[len], which means that 224 @ last block[s] are actually 225 @ loaded twice, but last 226 @ copy is not processed 227 vld1.64 {$H-$Hhl},[$Htbl],#32 @ load twisted H, ..., H^2 228 vmov.i8 $xC2,#0xe1 229 vld1.64 {$H2},[$Htbl] 230 cclr $inc,eq @ is it time to zero $inc? 231 vext.8 $Xl,$Xl,$Xl,#8 @ rotate Xi 232 vld1.64 {$t0},[$inp],#16 @ load [rotated] I[0] 233 vshl.u64 $xC2,$xC2,#57 @ compose 0xc2.0 constant 234#ifndef __ARMEB__ 235 vrev64.8 $t0,$t0 236 vrev64.8 $Xl,$Xl 237#endif 238 vext.8 $IN,$t0,$t0,#8 @ rotate I[0] 239 b.lo .Lodd_tail_v8 @ $len was less than 32 240___ 241{ my ($Xln,$Xmn,$Xhn,$In) = map("q$_",(4..7)); 242 ####### 243 # Xi+2 =[H*(Ii+1 + Xi+1)] mod P = 244 # [(H*Ii+1) + (H*Xi+1)] mod P = 245 # [(H*Ii+1) + H^2*(Ii+Xi)] mod P 246 # 247$code.=<<___; 248 vld1.64 {$t1},[$inp],$inc @ load [rotated] I[1] 249#ifndef __ARMEB__ 250 vrev64.8 $t1,$t1 251#endif 252 vext.8 $In,$t1,$t1,#8 253 veor $IN,$IN,$Xl @ I[i]^=Xi 254 vpmull.p64 $Xln,$H,$In @ H·Ii+1 255 veor $t1,$t1,$In @ Karatsuba pre-processing 256 vpmull2.p64 $Xhn,$H,$In 257 b .Loop_mod2x_v8 258 259.align 4 260.Loop_mod2x_v8: 261 vext.8 $t2,$IN,$IN,#8 262 subs $len,$len,#32 @ is there more data? 263 vpmull.p64 $Xl,$H2,$IN @ H^2.lo·Xi.lo 264 cclr $inc,lo @ is it time to zero $inc? 265 266 vpmull.p64 $Xmn,$Hhl,$t1 267 veor $t2,$t2,$IN @ Karatsuba pre-processing 268 vpmull2.p64 $Xh,$H2,$IN @ H^2.hi·Xi.hi 269 veor $Xl,$Xl,$Xln @ accumulate 270 vpmull2.p64 $Xm,$Hhl,$t2 @ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi) 271 vld1.64 {$t0},[$inp],$inc @ load [rotated] I[i+2] 272 273 veor $Xh,$Xh,$Xhn 274 cclr $inc,eq @ is it time to zero $inc? 275 veor $Xm,$Xm,$Xmn 276 277 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing 278 veor $t2,$Xl,$Xh 279 veor $Xm,$Xm,$t1 280 vld1.64 {$t1},[$inp],$inc @ load [rotated] I[i+3] 281#ifndef __ARMEB__ 282 vrev64.8 $t0,$t0 283#endif 284 veor $Xm,$Xm,$t2 285 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase of reduction 286 287#ifndef __ARMEB__ 288 vrev64.8 $t1,$t1 289#endif 290 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result 291 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl 292 vext.8 $In,$t1,$t1,#8 293 vext.8 $IN,$t0,$t0,#8 294 veor $Xl,$Xm,$t2 295 vpmull.p64 $Xln,$H,$In @ H·Ii+1 296 veor $IN,$IN,$Xh @ accumulate $IN early 297 298 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction 299 vpmull.p64 $Xl,$Xl,$xC2 300 veor $IN,$IN,$t2 301 veor $t1,$t1,$In @ Karatsuba pre-processing 302 veor $IN,$IN,$Xl 303 vpmull2.p64 $Xhn,$H,$In 304 b.hs .Loop_mod2x_v8 @ there was at least 32 more bytes 305 306 veor $Xh,$Xh,$t2 307 vext.8 $IN,$t0,$t0,#8 @ re-construct $IN 308 adds $len,$len,#32 @ re-construct $len 309 veor $Xl,$Xl,$Xh @ re-construct $Xl 310 b.eq .Ldone_v8 @ is $len zero? 311___ 312} 313$code.=<<___; 314.Lodd_tail_v8: 315 vext.8 $t2,$Xl,$Xl,#8 316 veor $IN,$IN,$Xl @ inp^=Xi 317 veor $t1,$t0,$t2 @ $t1 is rotated inp^Xi 318 319 vpmull.p64 $Xl,$H,$IN @ H.lo·Xi.lo 320 veor $t1,$t1,$IN @ Karatsuba pre-processing 321 vpmull2.p64 $Xh,$H,$IN @ H.hi·Xi.hi 322 vpmull.p64 $Xm,$Hhl,$t1 @ (H.lo+H.hi)·(Xi.lo+Xi.hi) 323 324 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing 325 veor $t2,$Xl,$Xh 326 veor $Xm,$Xm,$t1 327 veor $Xm,$Xm,$t2 328 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase of reduction 329 330 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result 331 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl 332 veor $Xl,$Xm,$t2 333 334 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction 335 vpmull.p64 $Xl,$Xl,$xC2 336 veor $t2,$t2,$Xh 337 veor $Xl,$Xl,$t2 338 339.Ldone_v8: 340#ifndef __ARMEB__ 341 vrev64.8 $Xl,$Xl 342#endif 343 vext.8 $Xl,$Xl,$Xl,#8 344 vst1.64 {$Xl},[$Xi] @ write out Xi 345 346___ 347$code.=<<___ if ($flavour !~ /64/); 348 vldmia sp!,{d8-d15} @ 32-bit ABI says so 349___ 350$code.=<<___; 351 ret 352.size gcm_ghash_v8,.-gcm_ghash_v8 353___ 354} 355$code.=<<___; 356.asciz "GHASH for ARMv8, CRYPTOGAMS by <appro\@openssl.org>" 357.align 2 358___ 359 360if ($flavour =~ /64/) { ######## 64-bit code 361 sub unvmov { 362 my $arg=shift; 363 364 $arg =~ m/q([0-9]+)#(lo|hi),\s*q([0-9]+)#(lo|hi)/o && 365 sprintf "ins v%d.d[%d],v%d.d[%d]",$1,($2 eq "lo")?0:1,$3,($4 eq "lo")?0:1; 366 } 367 foreach(split("\n",$code)) { 368 s/cclr\s+([wx])([^,]+),\s*([a-z]+)/csel $1$2,$1zr,$1$2,$3/o or 369 s/vmov\.i8/movi/o or # fix up legacy mnemonics 370 s/vmov\s+(.*)/unvmov($1)/geo or 371 s/vext\.8/ext/o or 372 s/vshr\.s/sshr\.s/o or 373 s/vshr/ushr/o or 374 s/^(\s+)v/$1/o or # strip off v prefix 375 s/\bbx\s+lr\b/ret/o; 376 377 s/\bq([0-9]+)\b/"v".($1<8?$1:$1+8).".16b"/geo; # old->new registers 378 s/@\s/\/\//o; # old->new style commentary 379 380 # fix up remainig legacy suffixes 381 s/\.[ui]?8(\s)/$1/o; 382 s/\.[uis]?32//o and s/\.16b/\.4s/go; 383 m/\.p64/o and s/\.16b/\.1q/o; # 1st pmull argument 384 m/l\.p64/o and s/\.16b/\.1d/go; # 2nd and 3rd pmull arguments 385 s/\.[uisp]?64//o and s/\.16b/\.2d/go; 386 s/\.[42]([sd])\[([0-3])\]/\.$1\[$2\]/o; 387 388 print $_,"\n"; 389 } 390} else { ######## 32-bit code 391 sub unvdup32 { 392 my $arg=shift; 393 394 $arg =~ m/q([0-9]+),\s*q([0-9]+)\[([0-3])\]/o && 395 sprintf "vdup.32 q%d,d%d[%d]",$1,2*$2+($3>>1),$3&1; 396 } 397 sub unvpmullp64 { 398 my ($mnemonic,$arg)=@_; 399 400 if ($arg =~ m/q([0-9]+),\s*q([0-9]+),\s*q([0-9]+)/o) { 401 my $word = 0xf2a00e00|(($1&7)<<13)|(($1&8)<<19) 402 |(($2&7)<<17)|(($2&8)<<4) 403 |(($3&7)<<1) |(($3&8)<<2); 404 $word |= 0x00010001 if ($mnemonic =~ "2"); 405 # since ARMv7 instructions are always encoded little-endian. 406 # correct solution is to use .inst directive, but older 407 # assemblers don't implement it:-( 408 sprintf ".byte\t0x%02x,0x%02x,0x%02x,0x%02x\t@ %s %s", 409 $word&0xff,($word>>8)&0xff, 410 ($word>>16)&0xff,($word>>24)&0xff, 411 $mnemonic,$arg; 412 } 413 } 414 415 foreach(split("\n",$code)) { 416 s/\b[wx]([0-9]+)\b/r$1/go; # new->old registers 417 s/\bv([0-9])\.[12468]+[bsd]\b/q$1/go; # new->old registers 418 s/\/\/\s?/@ /o; # new->old style commentary 419 420 # fix up remainig new-style suffixes 421 s/\],#[0-9]+/]!/o; 422 423 s/cclr\s+([^,]+),\s*([a-z]+)/mov$2 $1,#0/o or 424 s/vdup\.32\s+(.*)/unvdup32($1)/geo or 425 s/v?(pmull2?)\.p64\s+(.*)/unvpmullp64($1,$2)/geo or 426 s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo or 427 s/^(\s+)b\./$1b/o or 428 s/^(\s+)ret/$1bx\tlr/o; 429 430 print $_,"\n"; 431 } 432} 433 434close STDOUT; # enforce flush 435