/external/chromium_org/third_party/boringssl/src/crypto/modes/asm/ |
D | ghash-x86_64.pl | 122 $Xi="%rdi"; 232 movzb 15($Xi),$Zlo 235 &loop ($Xi); 237 mov $Zlo,8($Xi) 238 mov $Zhi,($Xi) 300 &mov ($Zlo,"8($Xi)"); 301 &mov ($Zhi,"0($Xi)"); 311 &mov ("($Xi)",$Zhi); 312 &mov ("8($Xi)","%rdx"); 347 &mov ($dat,"$j($Xi)") if (--$j%4==0); [all …]
|
D | ghash-x86.pl | 830 ($Xi,$Xhi)=("xmm0","xmm1"); $Hkey="xmm2"; 837 my ($Xhi,$Xi,$Hkey,$HK)=@_; 839 &movdqa ($Xhi,$Xi); # 840 &pshufd ($T1,$Xi,0b01001110); 842 &pxor ($T1,$Xi); # 846 &pclmulqdq ($Xi,$Hkey,0x00); ####### 849 &xorps ($T1,$Xi); # 856 &pxor ($Xi,$T2); # 865 my ($Xhi,$Xi,$Hkey)=@_; 867 &movdqa ($T1,$Xi); # [all …]
|
D | aesni-gcm-x86_64.pl | 68 $Z0,$Z1,$Z2,$Z3,$Xi) = map("%xmm$_",(0..8)); 125 vpxor $Z0,$Xi,$Xi # modulo-scheduled 136 vpxor 16+8(%rsp),$Xi,$Xi # modulo-scheduled [vpxor $Z3,$Xi,$Xi] 200 vpxor 0x70+8(%rsp),$Xi,$Xi # accumulate I[0] 214 vpclmulqdq \$0x10,$Hkey,$Xi,$Z1 217 vpclmulqdq \$0x01,$Hkey,$Xi,$T1 221 vpclmulqdq \$0x00,$Hkey,$Xi,$T2 224 vpclmulqdq \$0x11,$Hkey,$Xi,$Xi 239 vpxor $Xi,$Z3,$Z3 329 vmovdqu $Z3,16+8(%rsp) # postpone vpxor $Z3,$Xi,$Xi [all …]
|
D | ghash-armv4.pl | 77 $Xi="r0"; # argument block 105 str $_,[$Xi,#$i] 107 str $_,[$Xi,#$i] 110 strb $_,[$Xi,#$i+3] 112 strb $Tlh,[$Xi,#$i+2] 114 strb $Thl,[$Xi,#$i+1] 115 strb $Thh,[$Xi,#$i] 162 ldrb $nhi,[$Xi,#15] 180 ldrb $nhi,[$Xi,#14] 213 ldrbpl $Tll,[$Xi,$cnt] [all …]
|
/external/openssl/crypto/modes/asm/ |
D | ghash-x86_64.pl | 64 $Xi="%rdi"; 173 movzb 15($Xi),$Zlo 176 &loop ($Xi); 178 mov $Zlo,8($Xi) 179 mov $Zhi,($Xi) 241 &mov ($Zlo,"8($Xi)"); 242 &mov ($Zhi,"0($Xi)"); 252 &mov ("($Xi)",$Zhi); 253 &mov ("8($Xi)","%rdx"); 288 &mov ($dat,"$j($Xi)") if (--$j%4==0); [all …]
|
D | ghash-x86.pl | 818 ($Xi,$Xhi)=("xmm0","xmm1"); $Hkey="xmm2"; 825 my ($Xhi,$Xi,$Hkey)=@_; 827 &movdqa ($Xhi,$Xi); # 828 &pshufd ($T1,$Xi,0b01001110); 830 &pxor ($T1,$Xi); # 833 &pclmulqdq ($Xi,$Hkey,0x00); ####### 836 &xorps ($T1,$Xi); # 843 &pxor ($Xi,$T2); # 852 my ($Xhi,$Xi,$Hkey)=@_; 854 &movdqa ($T1,$Xi); # [all …]
|
D | ghash-s390x.pl | 58 $Xi="%r2"; # argument block 96 la %r1,0($Xi) # H lies right after Xi in gcm128_context 108 aghi $Xi,-1 113 lg $Zlo,8+1($Xi) # Xi 134 la %r1,0($Xi) # H lies right after Xi in gcm128_context 147 aghi $Xi,-1 152 lg $Zlo,8+1($Xi) # Xi 153 lg $Zhi,0+1($Xi) 159 stg $Zlo,8+1($Xi) 160 stg $Zhi,0+1($Xi) [all …]
|
D | ghash-armv4.pl | 77 $Xi="r0"; # argument block 105 str $_,[$Xi,#$i] 107 str $_,[$Xi,#$i] 110 strb $_,[$Xi,#$i+3] 112 strb $Tlh,[$Xi,#$i+2] 114 strb $Thl,[$Xi,#$i+1] 115 strb $Thh,[$Xi,#$i] 158 ldrb $nhi,[$Xi,#15] 176 ldrb $nhi,[$Xi,#14] 209 ldrplb $Tll,[$Xi,$cnt] [all …]
|
D | ghash-sparcv9.pl | 64 $Xi="%i0"; # input argument block 86 ldub [$Xi+15],$xi0 87 ldub [$Xi+14],$xi1 136 ldub [$Xi+$cnt],$xi1 188 stx $Zlo,[$Xi+8] 190 stx $Zhi,[$Xi] 208 stx $Zlo,[$Xi+8] 210 stx $Zhi,[$Xi] 226 ldub [$Xi+15],$nlo 238 ldub [$Xi+14],$nlo [all …]
|
D | ghash-parisc.pl | 53 $Xi="%r26"; # argument block 124 ldb 15($Xi),$nlo 136 ldb 14($Xi),$nlo 165 ldbx $cnt($Xi),$nlo 205 std $Zll,8($Xi) 206 std $Zhh,0($Xi) 214 ldb 15($Xi),$nlo 227 ldb 14($Xi),$nlo 258 ldbx $cnt($Xi),$nlo 313 stw $Zll,12($Xi) [all …]
|
D | ghashv8-armx.S | 34 vld1.64 {q9},[r0] @ load Xi 54 vld1.64 {q0},[r0] @ load [rotated] Xi 75 veor q3,q3,q0 @ inp^=Xi 76 veor q9,q9,q10 @ q9 is rotated inp^Xi 79 .byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo 81 .byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi 83 .byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi) 111 vst1.64 {q0},[r0] @ write out Xi
|
D | ghashv8-armx.pl | 30 $Xi="x0"; # argument block 78 vld1.64 {$t1},[$Xi] @ load Xi 90 mov $inp,$Xi 98 vld1.64 {$Xl},[$Xi] @ load [rotated] Xi 155 vst1.64 {$Xl},[$Xi] @ write out Xi
|
D | ghash-alpha.pl | 34 $Xi="a0"; # $16, input argument block 266 ldq $Xlo,8($Xi) 267 ldq $Xhi,0($Xi) 314 stq $Xlo,8($Xi) 315 stq $Xhi,0($Xi) 341 ldq $Xhi,0($Xi) 342 ldq $Xlo,8($Xi) 427 stq $Xlo,8($Xi) 428 stq $Xhi,0($Xi)
|
/external/chromium_org/third_party/boringssl/src/crypto/modes/ |
D | gcm.c | 150 static void gcm_gmult_4bit(uint64_t Xi[2], const u128 Htable[16]) { in gcm_gmult_4bit() 159 nlo = ((const uint8_t *)Xi)[15]; in gcm_gmult_4bit() 183 nlo = ((const uint8_t *)Xi)[cnt]; in gcm_gmult_4bit() 202 Xi[0] = BSWAP8(Z.hi); in gcm_gmult_4bit() 203 Xi[1] = BSWAP8(Z.lo); in gcm_gmult_4bit() 205 uint8_t *p = (uint8_t *)Xi; in gcm_gmult_4bit() 217 Xi[0] = Z.hi; in gcm_gmult_4bit() 218 Xi[1] = Z.lo; in gcm_gmult_4bit() 227 static void gcm_ghash_4bit(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp, in gcm_ghash_4bit() 239 nlo = ((const uint8_t *)Xi)[15]; in gcm_ghash_4bit() [all …]
|
/external/openssl/crypto/modes/ |
D | gcm128.c | 145 static void gcm_gmult_8bit(u64 Xi[2], const u128 Htable[256]) in gcm_gmult_8bit() 148 const u8 *xi = (const u8 *)Xi+15; in gcm_gmult_8bit() 221 if ((u8 *)Xi==xi) break; in gcm_gmult_8bit() 236 Xi[0] = BSWAP8(Z.hi); in gcm_gmult_8bit() 237 Xi[1] = BSWAP8(Z.lo); in gcm_gmult_8bit() 239 u8 *p = (u8 *)Xi; in gcm_gmult_8bit() 248 Xi[0] = Z.hi; in gcm_gmult_8bit() 249 Xi[1] = Z.lo; in gcm_gmult_8bit() 252 #define GCM_MUL(ctx,Xi) gcm_gmult_8bit(ctx->Xi.u,ctx->Htable) argument 335 static void gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16]) in gcm_gmult_4bit() [all …]
|
/external/openssl/crypto/sha/asm/ |
D | sha1-armv4-large.pl | 85 $Xi="r14"; 91 ldr $t0,[$Xi,#15*4] 92 ldr $t1,[$Xi,#13*4] 93 ldr $t2,[$Xi,#7*4] 95 ldr $t3,[$Xi,#2*4] 102 str $t0,[$Xi,#-4]! 135 str $t0,[$Xi,#-4]! 191 mov $Xi,sp 202 teq $Xi,sp 221 teq $Xi,sp @ preserve carry [all …]
|
D | sha1-sparcv9.pl | 32 $Xi="%g4"; 54 my $xi=($i&1)?@X[($i/2)%8]:$Xi; 72 " srlx @X[(($i+1)/2)%8],32,$Xi\n"; 91 sllx @X[($j+6)%8],32,$Xi ! Xupdate($i) 96 or $tmp1,$Xi,$Xi 98 xor $Xi,@X[$j%8],@X[$j%8] 99 srlx @X[$j%8],31,$Xi 101 and $Xi,$rot1m,$Xi 104 or $Xi,@X[$j%8],@X[$j%8] 116 $xi=$Xi; [all …]
|
D | sha1-thumb.pl | 40 $Xi="r12"; 133 mov $Xi,sp 170 mov $t0,$Xi 175 mov $Xi,$t1 181 cmp $Xi,$t0 187 mov $Xi,$t1 193 cmp $Xi,$t0 201 mov $Xi,$t1 207 cmp $Xi,$t0 211 mov $Xi,sp
|
D | sha1-586.pl | 415 my $Xi=4; # 4xSIMD Xupdate round, start pre-seeded 539 &movdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]);# save X[] to backtrace buffer 558 &movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU 587 &movdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if ($Xi>5); # restore X[] from backtrace buffer 595 &movdqa (@X[4],&QWP(112-16+16*(($Xi)/5),"esp")); # K_XX_XX 600 &movdqa (@X[1],@X[-2&7]) if ($Xi<7); 606 $Xi++; push(@X,shift(@X)); # "rotate" X[] 615 &movdqa (@X[2],@X[-1&7]) if ($Xi==8); 624 &movdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]); # save X[] to backtrace buffer 627 if ($Xi%5) { [all …]
|
D | sha1-x86_64.pl | 295 my $Xi=4; 403 &movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU 439 &movdqa (@Tx[2],eval(16*(($Xi)/5))."($K_XX_XX)"); # K_XX_XX 447 $Xi++; push(@X,shift(@X)); # "rotate" X[] 457 &movdqa (@Tx[0],@X[-1&7]) if ($Xi==8); 468 if ($Xi%5) { 471 &movdqa (@Tx[2],eval(16*($Xi/5))."($K_XX_XX)"); 484 &movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU 504 &movdqa (@Tx[1],@X[0]) if ($Xi<19); 514 $Xi++; push(@X,shift(@X)); # "rotate" X[] [all …]
|
D | sha1-sparcv9a.pl | 58 $Xi="%o7"; 152 ld [$Xfer+`4*($i%16)`],$Xi 160 add $Xi,$e,$e 171 ld [$Xfer+`4*($i%16)`],$Xi 180 add $Xi,$e,$e 204 ld [$Xfer+`4*($i%16)`],$Xi 217 add $Xi,$e,$e 222 ld [$Xfer+`4*($i%16)`],$Xi 237 add $Xi,$e,$e 242 ld [$Xfer+`4*($i%16)`],$Xi [all …]
|
/external/chromium_org/third_party/boringssl/src/crypto/sha/asm/ |
D | sha1-armv4-large.pl | 71 $Xi="r14"; 77 ldr $t0,[$Xi,#15*4] 78 ldr $t1,[$Xi,#13*4] 79 ldr $t2,[$Xi,#7*4] 81 ldr $t3,[$Xi,#2*4] 88 str $t0,[$Xi,#-4]! 121 str $t0,[$Xi,#-4]! 169 mov $Xi,sp 180 teq $Xi,sp 199 teq $Xi,sp @ preserve carry [all …]
|
D | sha1-x86_64.pl | 447 my $Xi=4; 577 &movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU 612 &movdqa (@Tx[2],eval(2*16*(($Xi)/5)-64)."($K_XX_XX)"); # K_XX_XX 618 &pshufd (@Tx[1],@X[-1&7],0xee) if ($Xi==7); # was &movdqa (@Tx[0],@X[-1&7]) in Xupdate_ssse3_32_79 622 $Xi++; push(@X,shift(@X)); # "rotate" X[] 632 eval(shift(@insns)) if ($Xi==8); 634 eval(shift(@insns)) if ($Xi==8); 646 if ($Xi%5) { 649 &movdqa (@Tx[2],eval(2*16*($Xi/5)-64)."($K_XX_XX)"); 665 &movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU [all …]
|
D | sha1-586.pl | 549 my $Xi=4; # 4xSIMD Xupdate round, start pre-seeded 678 &movdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]);# save X[] to backtrace buffer 696 &movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU 725 &movdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if ($Xi>5); # restore X[] from backtrace buffer 733 &movdqa (@X[4],&QWP(112-16+16*(($Xi)/5),"esp")); # K_XX_XX 738 &pshufd (@X[1],@X[-3&7],0xee) if ($Xi<7); # was &movdqa (@X[1],@X[-2&7]) 739 &pshufd (@X[3],@X[-1&7],0xee) if ($Xi==7); 745 $Xi++; push(@X,shift(@X)); # "rotate" X[] 762 &movdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]); # save X[] to backtrace buffer 766 if ($Xi%5) { [all …]
|
/external/openssl/crypto/aes/asm/ |
D | aesni-sha1-x86_64.pl | 110 my $Xi=4; 285 &movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU 321 &movdqa (@Tx[2],eval(16*(($Xi)/5))."($K_XX_XX)"); # K_XX_XX 329 $Xi++; push(@X,shift(@X)); # "rotate" X[] 339 &movdqa (@Tx[0],@X[-1&7]) if ($Xi==8); 350 if ($Xi%5) { 353 &movdqa (@Tx[2],eval(16*($Xi/5))."($K_XX_XX)"); 366 &movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU 386 &movdqa (@Tx[1],@X[0]) if ($Xi<19); 396 $Xi++; push(@X,shift(@X)); # "rotate" X[] [all …]
|