1#! /usr/bin/env perl 2# Author: Marc Bevand <bevand_m (at) epita.fr> 3# Copyright 2005-2016 The OpenSSL Project Authors. All Rights Reserved. 4# 5# Licensed under the OpenSSL license (the "License"). You may not use 6# this file except in compliance with the License. You can obtain a copy 7# in the file LICENSE in the source distribution or at 8# https://www.openssl.org/source/license.html 9 10# MD5 optimized for AMD64. 11 12use strict; 13 14my $code; 15 16# round1_step() does: 17# dst = x + ((dst + F(x,y,z) + X[k] + T_i) <<< s) 18# %r10d = X[k_next] 19# %r11d = z' (copy of z for the next step) 20# Each round1_step() takes about 5.3 clocks (9 instructions, 1.7 IPC) 21sub round1_step 22{ 23 my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_; 24 $code .= " mov 0*4(%rsi), %r10d /* (NEXT STEP) X[0] */\n" if ($pos == -1); 25 $code .= " mov %edx, %r11d /* (NEXT STEP) z' = %edx */\n" if ($pos == -1); 26 $code .= <<EOF; 27 xor $y, %r11d /* y ^ ... */ 28 lea $T_i($dst,%r10d),$dst /* Const + dst + ... */ 29 and $x, %r11d /* x & ... */ 30 xor $z, %r11d /* z ^ ... */ 31 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */ 32 add %r11d, $dst /* dst += ... */ 33 rol \$$s, $dst /* dst <<< s */ 34 mov $y, %r11d /* (NEXT STEP) z' = $y */ 35 add $x, $dst /* dst += x */ 36EOF 37} 38 39# round2_step() does: 40# dst = x + ((dst + G(x,y,z) + X[k] + T_i) <<< s) 41# %r10d = X[k_next] 42# %r11d = z' (copy of z for the next step) 43# %r12d = z' (copy of z for the next step) 44# Each round2_step() takes about 5.4 clocks (11 instructions, 2.0 IPC) 45sub round2_step 46{ 47 my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_; 48 $code .= " mov 1*4(%rsi), %r10d /* (NEXT STEP) X[1] */\n" if ($pos == -1); 49 $code .= " mov %edx, %r11d /* (NEXT STEP) z' = %edx */\n" if ($pos == -1); 50 $code .= " mov %edx, %r12d /* (NEXT STEP) z' = %edx */\n" if ($pos == -1); 51 $code .= <<EOF; 52 not %r11d /* not z */ 53 lea $T_i($dst,%r10d),$dst /* Const + dst + ... */ 54 and $x, %r12d /* x & z */ 55 and $y, %r11d /* y & (not z) */ 56 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */ 57 or %r11d, %r12d /* (y & (not z)) | (x & z) */ 58 mov $y, %r11d /* (NEXT STEP) z' = $y */ 59 add %r12d, $dst /* dst += ... */ 60 mov $y, %r12d /* (NEXT STEP) z' = $y */ 61 rol \$$s, $dst /* dst <<< s */ 62 add $x, $dst /* dst += x */ 63EOF 64} 65 66# round3_step() does: 67# dst = x + ((dst + H(x,y,z) + X[k] + T_i) <<< s) 68# %r10d = X[k_next] 69# %r11d = y' (copy of y for the next step) 70# Each round3_step() takes about 4.2 clocks (8 instructions, 1.9 IPC) 71sub round3_step 72{ 73 my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_; 74 $code .= " mov 5*4(%rsi), %r10d /* (NEXT STEP) X[5] */\n" if ($pos == -1); 75 $code .= " mov %ecx, %r11d /* (NEXT STEP) y' = %ecx */\n" if ($pos == -1); 76 $code .= <<EOF; 77 lea $T_i($dst,%r10d),$dst /* Const + dst + ... */ 78 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */ 79 xor $z, %r11d /* z ^ ... */ 80 xor $x, %r11d /* x ^ ... */ 81 add %r11d, $dst /* dst += ... */ 82 rol \$$s, $dst /* dst <<< s */ 83 mov $x, %r11d /* (NEXT STEP) y' = $x */ 84 add $x, $dst /* dst += x */ 85EOF 86} 87 88# round4_step() does: 89# dst = x + ((dst + I(x,y,z) + X[k] + T_i) <<< s) 90# %r10d = X[k_next] 91# %r11d = not z' (copy of not z for the next step) 92# Each round4_step() takes about 5.2 clocks (9 instructions, 1.7 IPC) 93sub round4_step 94{ 95 my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_; 96 $code .= " mov 0*4(%rsi), %r10d /* (NEXT STEP) X[0] */\n" if ($pos == -1); 97 $code .= " mov \$0xffffffff, %r11d\n" if ($pos == -1); 98 $code .= " xor %edx, %r11d /* (NEXT STEP) not z' = not %edx*/\n" 99 if ($pos == -1); 100 $code .= <<EOF; 101 lea $T_i($dst,%r10d),$dst /* Const + dst + ... */ 102 or $x, %r11d /* x | ... */ 103 xor $y, %r11d /* y ^ ... */ 104 add %r11d, $dst /* dst += ... */ 105 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */ 106 mov \$0xffffffff, %r11d 107 rol \$$s, $dst /* dst <<< s */ 108 xor $y, %r11d /* (NEXT STEP) not z' = not $y */ 109 add $x, $dst /* dst += x */ 110EOF 111} 112 113no warnings qw(uninitialized); 114my $flavour = shift; 115my $output = shift; 116if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } 117 118my $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/); 119 120$0 =~ m/(.*[\/\\])[^\/\\]+$/; my $dir=$1; my $xlate; 121( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or 122( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or 123die "can't locate x86_64-xlate.pl"; 124 125open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\""; 126*STDOUT=*OUT; 127 128$code .= <<EOF; 129.text 130.align 16 131 132.globl md5_block_asm_data_order 133.type md5_block_asm_data_order,\@function,3 134md5_block_asm_data_order: 135.cfi_startproc 136 _CET_ENDBR 137 push %rbp 138.cfi_push rbp 139 push %rbx 140.cfi_push rbx 141 push %r12 142.cfi_push r12 143 push %r14 144.cfi_push r14 145 push %r15 146.cfi_push r15 147.Lprologue: 148 149 # rdi = arg #1 (ctx, MD5_CTX pointer) 150 # rsi = arg #2 (ptr, data pointer) 151 # rdx = arg #3 (nbr, number of 16-word blocks to process) 152 mov %rdi, %rbp # rbp = ctx 153 shl \$6, %rdx # rdx = nbr in bytes 154 lea (%rsi,%rdx), %rdi # rdi = end 155 mov 0*4(%rbp), %eax # eax = ctx->A 156 mov 1*4(%rbp), %ebx # ebx = ctx->B 157 mov 2*4(%rbp), %ecx # ecx = ctx->C 158 mov 3*4(%rbp), %edx # edx = ctx->D 159 # end is 'rdi' 160 # ptr is 'rsi' 161 # A is 'eax' 162 # B is 'ebx' 163 # C is 'ecx' 164 # D is 'edx' 165 166 cmp %rdi, %rsi # cmp end with ptr 167 je .Lend # jmp if ptr == end 168 169 # BEGIN of loop over 16-word blocks 170.Lloop: # save old values of A, B, C, D 171 mov %eax, %r8d 172 mov %ebx, %r9d 173 mov %ecx, %r14d 174 mov %edx, %r15d 175EOF 176round1_step(-1,'%eax','%ebx','%ecx','%edx', '1','0xd76aa478', '7'); 177round1_step( 0,'%edx','%eax','%ebx','%ecx', '2','0xe8c7b756','12'); 178round1_step( 0,'%ecx','%edx','%eax','%ebx', '3','0x242070db','17'); 179round1_step( 0,'%ebx','%ecx','%edx','%eax', '4','0xc1bdceee','22'); 180round1_step( 0,'%eax','%ebx','%ecx','%edx', '5','0xf57c0faf', '7'); 181round1_step( 0,'%edx','%eax','%ebx','%ecx', '6','0x4787c62a','12'); 182round1_step( 0,'%ecx','%edx','%eax','%ebx', '7','0xa8304613','17'); 183round1_step( 0,'%ebx','%ecx','%edx','%eax', '8','0xfd469501','22'); 184round1_step( 0,'%eax','%ebx','%ecx','%edx', '9','0x698098d8', '7'); 185round1_step( 0,'%edx','%eax','%ebx','%ecx','10','0x8b44f7af','12'); 186round1_step( 0,'%ecx','%edx','%eax','%ebx','11','0xffff5bb1','17'); 187round1_step( 0,'%ebx','%ecx','%edx','%eax','12','0x895cd7be','22'); 188round1_step( 0,'%eax','%ebx','%ecx','%edx','13','0x6b901122', '7'); 189round1_step( 0,'%edx','%eax','%ebx','%ecx','14','0xfd987193','12'); 190round1_step( 0,'%ecx','%edx','%eax','%ebx','15','0xa679438e','17'); 191round1_step( 1,'%ebx','%ecx','%edx','%eax', '0','0x49b40821','22'); 192 193round2_step(-1,'%eax','%ebx','%ecx','%edx', '6','0xf61e2562', '5'); 194round2_step( 0,'%edx','%eax','%ebx','%ecx','11','0xc040b340', '9'); 195round2_step( 0,'%ecx','%edx','%eax','%ebx', '0','0x265e5a51','14'); 196round2_step( 0,'%ebx','%ecx','%edx','%eax', '5','0xe9b6c7aa','20'); 197round2_step( 0,'%eax','%ebx','%ecx','%edx','10','0xd62f105d', '5'); 198round2_step( 0,'%edx','%eax','%ebx','%ecx','15', '0x2441453', '9'); 199round2_step( 0,'%ecx','%edx','%eax','%ebx', '4','0xd8a1e681','14'); 200round2_step( 0,'%ebx','%ecx','%edx','%eax', '9','0xe7d3fbc8','20'); 201round2_step( 0,'%eax','%ebx','%ecx','%edx','14','0x21e1cde6', '5'); 202round2_step( 0,'%edx','%eax','%ebx','%ecx', '3','0xc33707d6', '9'); 203round2_step( 0,'%ecx','%edx','%eax','%ebx', '8','0xf4d50d87','14'); 204round2_step( 0,'%ebx','%ecx','%edx','%eax','13','0x455a14ed','20'); 205round2_step( 0,'%eax','%ebx','%ecx','%edx', '2','0xa9e3e905', '5'); 206round2_step( 0,'%edx','%eax','%ebx','%ecx', '7','0xfcefa3f8', '9'); 207round2_step( 0,'%ecx','%edx','%eax','%ebx','12','0x676f02d9','14'); 208round2_step( 1,'%ebx','%ecx','%edx','%eax', '0','0x8d2a4c8a','20'); 209 210round3_step(-1,'%eax','%ebx','%ecx','%edx', '8','0xfffa3942', '4'); 211round3_step( 0,'%edx','%eax','%ebx','%ecx','11','0x8771f681','11'); 212round3_step( 0,'%ecx','%edx','%eax','%ebx','14','0x6d9d6122','16'); 213round3_step( 0,'%ebx','%ecx','%edx','%eax', '1','0xfde5380c','23'); 214round3_step( 0,'%eax','%ebx','%ecx','%edx', '4','0xa4beea44', '4'); 215round3_step( 0,'%edx','%eax','%ebx','%ecx', '7','0x4bdecfa9','11'); 216round3_step( 0,'%ecx','%edx','%eax','%ebx','10','0xf6bb4b60','16'); 217round3_step( 0,'%ebx','%ecx','%edx','%eax','13','0xbebfbc70','23'); 218round3_step( 0,'%eax','%ebx','%ecx','%edx', '0','0x289b7ec6', '4'); 219round3_step( 0,'%edx','%eax','%ebx','%ecx', '3','0xeaa127fa','11'); 220round3_step( 0,'%ecx','%edx','%eax','%ebx', '6','0xd4ef3085','16'); 221round3_step( 0,'%ebx','%ecx','%edx','%eax', '9', '0x4881d05','23'); 222round3_step( 0,'%eax','%ebx','%ecx','%edx','12','0xd9d4d039', '4'); 223round3_step( 0,'%edx','%eax','%ebx','%ecx','15','0xe6db99e5','11'); 224round3_step( 0,'%ecx','%edx','%eax','%ebx', '2','0x1fa27cf8','16'); 225round3_step( 1,'%ebx','%ecx','%edx','%eax', '0','0xc4ac5665','23'); 226 227round4_step(-1,'%eax','%ebx','%ecx','%edx', '7','0xf4292244', '6'); 228round4_step( 0,'%edx','%eax','%ebx','%ecx','14','0x432aff97','10'); 229round4_step( 0,'%ecx','%edx','%eax','%ebx', '5','0xab9423a7','15'); 230round4_step( 0,'%ebx','%ecx','%edx','%eax','12','0xfc93a039','21'); 231round4_step( 0,'%eax','%ebx','%ecx','%edx', '3','0x655b59c3', '6'); 232round4_step( 0,'%edx','%eax','%ebx','%ecx','10','0x8f0ccc92','10'); 233round4_step( 0,'%ecx','%edx','%eax','%ebx', '1','0xffeff47d','15'); 234round4_step( 0,'%ebx','%ecx','%edx','%eax', '8','0x85845dd1','21'); 235round4_step( 0,'%eax','%ebx','%ecx','%edx','15','0x6fa87e4f', '6'); 236round4_step( 0,'%edx','%eax','%ebx','%ecx', '6','0xfe2ce6e0','10'); 237round4_step( 0,'%ecx','%edx','%eax','%ebx','13','0xa3014314','15'); 238round4_step( 0,'%ebx','%ecx','%edx','%eax', '4','0x4e0811a1','21'); 239round4_step( 0,'%eax','%ebx','%ecx','%edx','11','0xf7537e82', '6'); 240round4_step( 0,'%edx','%eax','%ebx','%ecx', '2','0xbd3af235','10'); 241round4_step( 0,'%ecx','%edx','%eax','%ebx', '9','0x2ad7d2bb','15'); 242round4_step( 1,'%ebx','%ecx','%edx','%eax', '0','0xeb86d391','21'); 243$code .= <<EOF; 244 # add old values of A, B, C, D 245 add %r8d, %eax 246 add %r9d, %ebx 247 add %r14d, %ecx 248 add %r15d, %edx 249 250 # loop control 251 add \$64, %rsi # ptr += 64 252 cmp %rdi, %rsi # cmp end with ptr 253 jb .Lloop # jmp if ptr < end 254 # END of loop over 16-word blocks 255 256.Lend: 257 mov %eax, 0*4(%rbp) # ctx->A = A 258 mov %ebx, 1*4(%rbp) # ctx->B = B 259 mov %ecx, 2*4(%rbp) # ctx->C = C 260 mov %edx, 3*4(%rbp) # ctx->D = D 261 262 mov (%rsp),%r15 263.cfi_restore r15 264 mov 8(%rsp),%r14 265.cfi_restore r14 266 mov 16(%rsp),%r12 267.cfi_restore r12 268 mov 24(%rsp),%rbx 269.cfi_restore rbx 270 mov 32(%rsp),%rbp 271.cfi_restore rbp 272 add \$40,%rsp 273.cfi_adjust_cfa_offset -40 274.Lepilogue: 275 ret 276.cfi_endproc 277.size md5_block_asm_data_order,.-md5_block_asm_data_order 278EOF 279 280# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame, 281# CONTEXT *context,DISPATCHER_CONTEXT *disp) 282if ($win64) { 283my $rec="%rcx"; 284my $frame="%rdx"; 285my $context="%r8"; 286my $disp="%r9"; 287 288$code.=<<___; 289.extern __imp_RtlVirtualUnwind 290.type se_handler,\@abi-omnipotent 291.align 16 292se_handler: 293 push %rsi 294 push %rdi 295 push %rbx 296 push %rbp 297 push %r12 298 push %r13 299 push %r14 300 push %r15 301 pushfq 302 sub \$64,%rsp 303 304 mov 120($context),%rax # pull context->Rax 305 mov 248($context),%rbx # pull context->Rip 306 307 lea .Lprologue(%rip),%r10 308 cmp %r10,%rbx # context->Rip<.Lprologue 309 jb .Lin_prologue 310 311 mov 152($context),%rax # pull context->Rsp 312 313 lea .Lepilogue(%rip),%r10 314 cmp %r10,%rbx # context->Rip>=.Lepilogue 315 jae .Lin_prologue 316 317 lea 40(%rax),%rax 318 319 mov -8(%rax),%rbp 320 mov -16(%rax),%rbx 321 mov -24(%rax),%r12 322 mov -32(%rax),%r14 323 mov -40(%rax),%r15 324 mov %rbx,144($context) # restore context->Rbx 325 mov %rbp,160($context) # restore context->Rbp 326 mov %r12,216($context) # restore context->R12 327 mov %r14,232($context) # restore context->R14 328 mov %r15,240($context) # restore context->R15 329 330.Lin_prologue: 331 mov 8(%rax),%rdi 332 mov 16(%rax),%rsi 333 mov %rax,152($context) # restore context->Rsp 334 mov %rsi,168($context) # restore context->Rsi 335 mov %rdi,176($context) # restore context->Rdi 336 337 mov 40($disp),%rdi # disp->ContextRecord 338 mov $context,%rsi # context 339 mov \$154,%ecx # sizeof(CONTEXT) 340 .long 0xa548f3fc # cld; rep movsq 341 342 mov $disp,%rsi 343 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER 344 mov 8(%rsi),%rdx # arg2, disp->ImageBase 345 mov 0(%rsi),%r8 # arg3, disp->ControlPc 346 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry 347 mov 40(%rsi),%r10 # disp->ContextRecord 348 lea 56(%rsi),%r11 # &disp->HandlerData 349 lea 24(%rsi),%r12 # &disp->EstablisherFrame 350 mov %r10,32(%rsp) # arg5 351 mov %r11,40(%rsp) # arg6 352 mov %r12,48(%rsp) # arg7 353 mov %rcx,56(%rsp) # arg8, (NULL) 354 call *__imp_RtlVirtualUnwind(%rip) 355 356 mov \$1,%eax # ExceptionContinueSearch 357 add \$64,%rsp 358 popfq 359 pop %r15 360 pop %r14 361 pop %r13 362 pop %r12 363 pop %rbp 364 pop %rbx 365 pop %rdi 366 pop %rsi 367 ret 368.size se_handler,.-se_handler 369 370.section .pdata 371.align 4 372 .rva .LSEH_begin_md5_block_asm_data_order 373 .rva .LSEH_end_md5_block_asm_data_order 374 .rva .LSEH_info_md5_block_asm_data_order 375 376.section .xdata 377.align 8 378.LSEH_info_md5_block_asm_data_order: 379 .byte 9,0,0,0 380 .rva se_handler 381___ 382} 383 384print $code; 385 386close STDOUT or die "error closing STDOUT: $!"; 387