Home
last modified time | relevance | path

Searched refs:Xi (Results 1 – 25 of 102) sorted by relevance

12345

/external/boringssl/src/crypto/fipsmodule/modes/asm/
Dghash-x86_64.pl133 ($Xi,$Xhi)=("%xmm0","%xmm1"); $Hkey="%xmm2";
137 my ($Xhi,$Xi,$Hkey,$HK)=@_;
141 movdqa $Xi,$Xhi #
142 pshufd \$0b01001110,$Xi,$T1
144 pxor $Xi,$T1 #
149 movdqa $Xi,$Xhi #
150 pshufd \$0b01001110,$Xi,$T1
151 pxor $Xi,$T1 #
155 pclmulqdq \$0x00,$Hkey,$Xi #######
158 pxor $Xi,$T1 #
[all …]
Dghash-x86.pl160 ($Xi,$Xhi)=("xmm0","xmm1"); $Hkey="xmm2";
167 my ($Xhi,$Xi,$Hkey,$HK)=@_;
169 &movdqa ($Xhi,$Xi); #
170 &pshufd ($T1,$Xi,0b01001110);
172 &pxor ($T1,$Xi); #
176 &pclmulqdq ($Xi,$Hkey,0x00); #######
179 &xorps ($T1,$Xi); #
186 &pxor ($Xi,$T2); #
195 my ($Xhi,$Xi,$Hkey)=@_;
197 &movdqa ($T1,$Xi); #
[all …]
Daesni-gcm-x86_64.pl77 $Z0,$Z1,$Z2,$Z3,$Xi) = map("%xmm$_",(0..8));
152 vpxor $Z0,$Xi,$Xi # modulo-scheduled
163 vpxor 16+8(%rsp),$Xi,$Xi # modulo-scheduled [vpxor $Z3,$Xi,$Xi]
227 vpxor 0x70+8(%rsp),$Xi,$Xi # accumulate I[0]
241 vpclmulqdq \$0x10,$Hkey,$Xi,$Z1
244 vpclmulqdq \$0x01,$Hkey,$Xi,$T1
248 vpclmulqdq \$0x00,$Hkey,$Xi,$T2
251 vpclmulqdq \$0x11,$Hkey,$Xi,$Xi
266 vpxor $Xi,$Z3,$Z3
356 vmovdqu $Z3,16+8(%rsp) # postpone vpxor $Z3,$Xi,$Xi
[all …]
Dghash-ssse3-x86.pl80 my ($Xi, $Htable, $in, $len) = ("edi", "esi", "edx", "ecx");
157 &mov($Xi, &wparam(0));
160 &movdqu("xmm0", &QWP(0, $Xi));
190 &movdqu(&QWP(0, $Xi), "xmm2");
208 &mov($Xi, &wparam(0));
213 &movdqu("xmm0", &QWP(0, $Xi));
266 &movdqu(&QWP(0, $Xi), "xmm0");
/external/rust/crates/ring/crypto/fipsmodule/modes/asm/
Dghash-x86_64.pl133 ($Xi,$Xhi)=("%xmm0","%xmm1"); $Hkey="%xmm2";
137 my ($Xhi,$Xi,$Hkey,$HK)=@_;
141 movdqa $Xi,$Xhi #
142 pshufd \$0b01001110,$Xi,$T1
144 pxor $Xi,$T1 #
149 movdqa $Xi,$Xhi #
150 pshufd \$0b01001110,$Xi,$T1
151 pxor $Xi,$T1 #
155 pclmulqdq \$0x00,$Hkey,$Xi #######
158 pxor $Xi,$T1 #
[all …]
Dghash-x86.pl156 ($Xi,$Xhi)=("xmm0","xmm1"); $Hkey="xmm2";
163 my ($Xhi,$Xi,$Hkey,$HK)=@_;
165 &movdqa ($Xhi,$Xi); #
166 &pshufd ($T1,$Xi,0b01001110);
168 &pxor ($T1,$Xi); #
172 &pclmulqdq ($Xi,$Hkey,0x00); #######
175 &xorps ($T1,$Xi); #
182 &pxor ($Xi,$T2); #
191 my ($Xhi,$Xi,$Hkey)=@_;
193 &movdqa ($T1,$Xi); #
[all …]
Daesni-gcm-x86_64.pl77 $Z0,$Z1,$Z2,$Z3,$Xi) = map("%xmm$_",(0..8));
152 vpxor $Z0,$Xi,$Xi # modulo-scheduled
163 vpxor 16+8(%rsp),$Xi,$Xi # modulo-scheduled [vpxor $Z3,$Xi,$Xi]
227 vpxor 0x70+8(%rsp),$Xi,$Xi # accumulate I[0]
241 vpclmulqdq \$0x10,$Hkey,$Xi,$Z1
244 vpclmulqdq \$0x01,$Hkey,$Xi,$T1
248 vpclmulqdq \$0x00,$Hkey,$Xi,$T2
251 vpclmulqdq \$0x11,$Hkey,$Xi,$Xi
266 vpxor $Xi,$Z3,$Z3
356 vmovdqu $Z3,16+8(%rsp) # postpone vpxor $Z3,$Xi,$Xi
[all …]
/external/llvm-project/clang/test/Analysis/
Dplacement-new.cpp187 } Xi; // expected-note {{'Xi' initialized here}} in f1() local
190 …::new (&Xi.a) long; // expected-warning{{Storage type is aligned to 1 bytes but allocated type is … in f1()
198 } Xi; in f2() local
201 ::new (&Xi.a) long; in f2()
209 } Xi; // expected-note {{'Xi' initialized here}} in f3() local
212 …::new (&Xi.b) long; // expected-warning{{Storage type is aligned to 1 bytes but allocated type is … in f3()
223 } Xi; // expected-note {{'Xi' initialized here}} in f4() local
226 …::new (&Xi.y.b) long; // expected-warning{{Storage type is aligned to 2 bytes but allocated type i… in f4()
260 } Xi; // expected-note {{'Xi' initialized here}} in f9() local
263 ::new (&Xi.b[8]) long; in f9()
[all …]
/external/rust/crates/quiche/deps/boringssl/src/crypto/fipsmodule/modes/
Dinternal.h140 typedef void (*gmult_func)(uint64_t Xi[2], const u128 Htable[16]);
145 typedef void (*ghash_func)(uint64_t Xi[2], const u128 Htable[16],
175 } Yi, EKi, EK0, len, Xi; member
265 void gcm_gmult_nohw(uint64_t Xi[2], const u128 Htable[16]);
266 void gcm_ghash_nohw(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
273 void gcm_init_clmul(u128 Htable[16], const uint64_t Xi[2]);
274 void gcm_gmult_clmul(uint64_t Xi[2], const u128 Htable[16]);
275 void gcm_ghash_clmul(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
284 void gcm_init_ssse3(u128 Htable[16], const uint64_t Xi[2]);
285 void gcm_gmult_ssse3(uint64_t Xi[2], const u128 Htable[16]);
[all …]
Dgcm.c66 #define GCM_MUL(ctx, Xi) gcm_gmult_nohw((ctx)->Xi.u, (ctx)->gcm_key.Htable) argument
68 gcm_ghash_nohw((ctx)->Xi.u, (ctx)->gcm_key.Htable, in, len)
130 #define GCM_MUL(ctx, Xi) (*gcm_gmult_p)((ctx)->Xi.u, (ctx)->gcm_key.Htable) argument
133 (*gcm_ghash_p)((ctx)->Xi.u, (ctx)->gcm_key.Htable, in, len)
234 void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) = in CRYPTO_gcm128_setiv()
240 ctx->Xi.u[0] = 0; in CRYPTO_gcm128_setiv()
241 ctx->Xi.u[1] = 0; in CRYPTO_gcm128_setiv()
283 void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) = in CRYPTO_gcm128_aad()
285 void (*gcm_ghash_p)(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp, in CRYPTO_gcm128_aad()
302 ctx->Xi.c[n] ^= *(aad++); in CRYPTO_gcm128_aad()
[all …]
Dgcm_nohw.c195 void gcm_init_nohw(u128 Htable[16], const uint64_t Xi[2]) { in gcm_init_nohw()
206 Htable[0].lo = Xi[1]; in gcm_init_nohw()
207 Htable[0].hi = Xi[0]; in gcm_init_nohw()
224 static void gcm_polyval_nohw(uint64_t Xi[2], const u128 *H) { in gcm_polyval_nohw()
229 gcm_mul64_nohw(&r0, &r1, Xi[0], H->lo); in gcm_polyval_nohw()
231 gcm_mul64_nohw(&r2, &r3, Xi[1], H->hi); in gcm_polyval_nohw()
233 gcm_mul64_nohw(&mid0, &mid1, Xi[0] ^ Xi[1], H->hi ^ H->lo); in gcm_polyval_nohw()
273 Xi[0] = r2; in gcm_polyval_nohw()
274 Xi[1] = r3; in gcm_polyval_nohw()
277 void gcm_gmult_nohw(uint64_t Xi[2], const u128 Htable[16]) { in gcm_gmult_nohw()
[all …]
/external/boringssl/src/crypto/fipsmodule/modes/
Dinternal.h140 typedef void (*gmult_func)(uint64_t Xi[2], const u128 Htable[16]);
145 typedef void (*ghash_func)(uint64_t Xi[2], const u128 Htable[16],
175 } Yi, EKi, EK0, len, Xi; member
265 void gcm_gmult_nohw(uint64_t Xi[2], const u128 Htable[16]);
266 void gcm_ghash_nohw(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
273 void gcm_init_clmul(u128 Htable[16], const uint64_t Xi[2]);
274 void gcm_gmult_clmul(uint64_t Xi[2], const u128 Htable[16]);
275 void gcm_ghash_clmul(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
284 void gcm_init_ssse3(u128 Htable[16], const uint64_t Xi[2]);
285 void gcm_gmult_ssse3(uint64_t Xi[2], const u128 Htable[16]);
[all …]
Dgcm.c66 #define GCM_MUL(ctx, Xi) gcm_gmult_nohw((ctx)->Xi.u, (ctx)->gcm_key.Htable) argument
68 gcm_ghash_nohw((ctx)->Xi.u, (ctx)->gcm_key.Htable, in, len)
130 #define GCM_MUL(ctx, Xi) (*gcm_gmult_p)((ctx)->Xi.u, (ctx)->gcm_key.Htable) argument
133 (*gcm_ghash_p)((ctx)->Xi.u, (ctx)->gcm_key.Htable, in, len)
234 void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) = in CRYPTO_gcm128_setiv()
240 ctx->Xi.u[0] = 0; in CRYPTO_gcm128_setiv()
241 ctx->Xi.u[1] = 0; in CRYPTO_gcm128_setiv()
283 void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) = in CRYPTO_gcm128_aad()
285 void (*gcm_ghash_p)(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp, in CRYPTO_gcm128_aad()
302 ctx->Xi.c[n] ^= *(aad++); in CRYPTO_gcm128_aad()
[all …]
Dgcm_nohw.c195 void gcm_init_nohw(u128 Htable[16], const uint64_t Xi[2]) { in gcm_init_nohw()
206 Htable[0].lo = Xi[1]; in gcm_init_nohw()
207 Htable[0].hi = Xi[0]; in gcm_init_nohw()
224 static void gcm_polyval_nohw(uint64_t Xi[2], const u128 *H) { in gcm_polyval_nohw()
229 gcm_mul64_nohw(&r0, &r1, Xi[0], H->lo); in gcm_polyval_nohw()
231 gcm_mul64_nohw(&r2, &r3, Xi[1], H->hi); in gcm_polyval_nohw()
233 gcm_mul64_nohw(&mid0, &mid1, Xi[0] ^ Xi[1], H->hi ^ H->lo); in gcm_polyval_nohw()
273 Xi[0] = r2; in gcm_polyval_nohw()
274 Xi[1] = r3; in gcm_polyval_nohw()
277 void gcm_gmult_nohw(uint64_t Xi[2], const u128 Htable[16]) { in gcm_gmult_nohw()
[all …]
/external/rust/crates/ring/src/aead/
Dgcm.rs87 Xi: Xi(Block::zero()), in new()
117 let xi = &mut self.inner.Xi; in update_blocks()
125 xi: &mut Xi, in update_blocks() argument
145 xi: &mut Xi, in update_blocks() argument
160 xi: &mut Xi, in update_blocks() argument
179 self.inner.Xi.bitxor_assign(a); in update_block()
184 let xi = &mut self.inner.Xi; in update_block()
196 fn GFp_gcm_gmult_clmul(xi: &mut Xi, Htable: &HTable); in update_block() argument
206 fn GFp_gcm_gmult_neon(xi: &mut Xi, Htable: &HTable); in update_block() argument
222 F: FnOnce(Xi) -> super::Tag, in pre_finish()
[all …]
/external/boringssl/src/crypto/fipsmodule/sha/asm/
Dsha1-armv4-large.pl108 $Xi="r14";
114 ldr $t0,[$Xi,#15*4]
115 ldr $t1,[$Xi,#13*4]
116 ldr $t2,[$Xi,#7*4]
118 ldr $t3,[$Xi,#2*4]
125 str $t0,[$Xi,#-4]!
158 str $t0,[$Xi,#-4]!
223 mov $Xi,sp
236 teq $Xi,$t3
238 teq $Xi,sp
[all …]
Dsha1-x86_64.pl462 my $Xi=4;
599 &movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU
634 &movdqa (@Tx[2],eval(2*16*(($Xi)/5)-64)."($K_XX_XX)"); # K_XX_XX
640 &pshufd (@Tx[1],@X[-1&7],0xee) if ($Xi==7); # was &movdqa (@Tx[0],@X[-1&7]) in Xupdate_ssse3_32_79
644 $Xi++; push(@X,shift(@X)); # "rotate" X[]
654 eval(shift(@insns)) if ($Xi==8);
656 eval(shift(@insns)) if ($Xi==8);
668 if ($Xi%5) {
671 &movdqa (@Tx[2],eval(2*16*($Xi/5)-64)."($K_XX_XX)");
687 &movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU
[all …]
Dsha1-586.pl560 my $Xi=4; # 4xSIMD Xupdate round, start pre-seeded
689 &movdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]);# save X[] to backtrace buffer
707 &movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU
736 &movdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if ($Xi>5); # restore X[] from backtrace buffer
744 &movdqa (@X[4],&QWP(112-16+16*(($Xi)/5),"esp")); # K_XX_XX
749 &pshufd (@X[1],@X[-3&7],0xee) if ($Xi<7); # was &movdqa (@X[1],@X[-2&7])
750 &pshufd (@X[3],@X[-1&7],0xee) if ($Xi==7);
756 $Xi++; push(@X,shift(@X)); # "rotate" X[]
773 &movdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]); # save X[] to backtrace buffer
777 if ($Xi%5) {
[all …]
/external/rust/crates/ring/pregenerated/
Dghashv8-armx-ios32.S80 vld1.64 {q9},[r0] @ load Xi
89 .byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo
91 .byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi
92 .byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
113 vst1.64 {q0},[r0] @ write out Xi
126 vld1.64 {q0},[r0] @ load [rotated] Xi
146 vext.8 q0,q0,q0,#8 @ rotate Xi
160 veor q3,q3,q0 @ I[i]^=Xi
170 .byte 0x86,0x0e,0xac,0xf2 @ pmull q0,q14,q3 @ H^2.lo·Xi.lo
175 .byte 0x87,0x4e,0xad,0xf2 @ pmull2 q2,q14,q3 @ H^2.hi·Xi.hi
[all …]
Dghashv8-armx-linux32.S77 vld1.64 {q9},[r0] @ load Xi
86 .byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo
88 .byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi
89 .byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
110 vst1.64 {q0},[r0] @ write out Xi
121 vld1.64 {q0},[r0] @ load [rotated] Xi
141 vext.8 q0,q0,q0,#8 @ rotate Xi
155 veor q3,q3,q0 @ I[i]^=Xi
165 .byte 0x86,0x0e,0xac,0xf2 @ pmull q0,q14,q3 @ H^2.lo·Xi.lo
170 .byte 0x87,0x4e,0xad,0xf2 @ pmull2 q2,q14,q3 @ H^2.hi·Xi.hi
[all …]
/external/openscreen/third_party/boringssl/ios-arm/crypto/fipsmodule/
Dghashv8-armx32.S83 vld1.64 {q9},[r0] @ load Xi
92 .byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo
94 .byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi
95 .byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
116 vst1.64 {q0},[r0] @ write out Xi
129 vld1.64 {q0},[r0] @ load [rotated] Xi
149 vext.8 q0,q0,q0,#8 @ rotate Xi
163 veor q3,q3,q0 @ I[i]^=Xi
173 .byte 0x86,0x0e,0xac,0xf2 @ pmull q0,q14,q3 @ H^2.lo·Xi.lo
178 .byte 0x87,0x4e,0xad,0xf2 @ pmull2 q2,q14,q3 @ H^2.hi·Xi.hi
[all …]
/external/rust/crates/quiche/deps/boringssl/ios-arm/crypto/fipsmodule/
Dghashv8-armx32.S81 vld1.64 {q9},[r0] @ load Xi
90 .byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo
92 .byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi
93 .byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
114 vst1.64 {q0},[r0] @ write out Xi
126 vld1.64 {q0},[r0] @ load [rotated] Xi
146 vext.8 q0,q0,q0,#8 @ rotate Xi
160 veor q3,q3,q0 @ I[i]^=Xi
170 .byte 0x86,0x0e,0xac,0xf2 @ pmull q0,q14,q3 @ H^2.lo·Xi.lo
175 .byte 0x87,0x4e,0xad,0xf2 @ pmull2 q2,q14,q3 @ H^2.hi·Xi.hi
[all …]
/external/boringssl/ios-arm/crypto/fipsmodule/
Dghashv8-armx32.S83 vld1.64 {q9},[r0] @ load Xi
92 .byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo
94 .byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi
95 .byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
116 vst1.64 {q0},[r0] @ write out Xi
129 vld1.64 {q0},[r0] @ load [rotated] Xi
149 vext.8 q0,q0,q0,#8 @ rotate Xi
163 veor q3,q3,q0 @ I[i]^=Xi
173 .byte 0x86,0x0e,0xac,0xf2 @ pmull q0,q14,q3 @ H^2.lo·Xi.lo
178 .byte 0x87,0x4e,0xad,0xf2 @ pmull2 q2,q14,q3 @ H^2.hi·Xi.hi
[all …]
/external/boringssl/linux-arm/crypto/fipsmodule/
Dghashv8-armx32.S80 vld1.64 {q9},[r0] @ load Xi
89 .byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo
91 .byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi
92 .byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
113 vst1.64 {q0},[r0] @ write out Xi
124 vld1.64 {q0},[r0] @ load [rotated] Xi
144 vext.8 q0,q0,q0,#8 @ rotate Xi
158 veor q3,q3,q0 @ I[i]^=Xi
168 .byte 0x86,0x0e,0xac,0xf2 @ pmull q0,q14,q3 @ H^2.lo·Xi.lo
173 .byte 0x87,0x4e,0xad,0xf2 @ pmull2 q2,q14,q3 @ H^2.hi·Xi.hi
[all …]
/external/openscreen/third_party/boringssl/linux-arm/crypto/fipsmodule/
Dghashv8-armx32.S80 vld1.64 {q9},[r0] @ load Xi
89 .byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo
91 .byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi
92 .byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
113 vst1.64 {q0},[r0] @ write out Xi
124 vld1.64 {q0},[r0] @ load [rotated] Xi
144 vext.8 q0,q0,q0,#8 @ rotate Xi
158 veor q3,q3,q0 @ I[i]^=Xi
168 .byte 0x86,0x0e,0xac,0xf2 @ pmull q0,q14,q3 @ H^2.lo·Xi.lo
173 .byte 0x87,0x4e,0xad,0xf2 @ pmull2 q2,q14,q3 @ H^2.hi·Xi.hi
[all …]

12345