Home
last modified time | relevance | path

Searched refs:H4 (Results 1 – 25 of 53) sorted by relevance

123

/third_party/skia/resources/sksl/errors/
DBinaryTypeCoercion.sksl3 half4 H4 = half4(1.0);
15 void vector_times_vector_ok_1() { F4 = F4 * H4; }
16 void vector_times_vector_ok_2() { F4 = H4 * F4; }
17 void vector_times_vector_ok_3() { F4 *= H4; }
18 void vector_times_vector_disallowed_1() { H4 = F4 * H4; }
19 void vector_times_vector_disallowed_2() { H4 = H4 * F4; }
20 void vector_times_vector_disallowed_3() { H4 *= F4; }
22 void scalar_times_vector_ok_1() { F4 = F * H4; }
24 void scalar_times_vector_disallowed_1() { H4 = F * H4; }
25 void scalar_times_vector_disallowed_2() { H4 = H * F4; }
[all …]
/third_party/openssl/crypto/poly1305/asm/
Dpoly1305-c64xplus.pl32 ($H0,$H1,$H2,$H3,$H4,$H4a)=("A8","B8","A10","B10","B2",$LEN);
142 LDW *${CTXA}[4],$H4 ; load h4
170 || ADD $PADBIT,$H4,$H4 ; h4+=padbit
173 || ADD $D3,$H4,$H4
191 MPY32 $H4,$S1,B20
192 || MPY32 $H4,$S2a,A20
198 MPY32 $H4,$S3b,B22
205 MPY32 $H4,$R0b,$H4
231 ADD B31,$H4,$H4
234 SHRU $H4,2,B16 ; last reduction step
[all …]
Dpoly1305-x86_64.pl344 my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) =
639 vmovd $h2#d,$H4
742 vmovd $h2#d,$H4
776 vmovd 4*4($ctx),$H4
924 vmovdqa $H4,0x40(%r11) #
929 vmovdqa 0x40(%rsp),$H4 # s2^2
936 vpmuludq $T4,$H4,$H0 # h4*s2
937 vpmuludq $T3,$H4,$H4 # h3*s2
940 vpaddq $H4,$D0,$D0 # d0 += h3*s2
942 vmovdqa 0x80(%rsp),$H4 # s4^2
[all …]
Dpoly1305-s390x.pl225 my ($H0, $H1, $H2, $H3, $H4) = map("%v$_",(0..4));
313 vmalof ($ACC0,$H4,$S1,$ACC0);
314 vmalof ($ACC1,$H4,$S2,$ACC1);
315 vmalof ($ACC2,$H4,$S3,$ACC2);
316 vmalof ($ACC3,$H4,$S4,$ACC3);
317 vmalof ($ACC4,$H4,$R0,$ACC4);
322 vesrlg ($H4,$ACC3,26);
326 vag ($H4,$H4,$ACC4); # h3 -> h4
329 vesrlg ($ACC4,$H4,26);
331 vn ($H4,$H4,$mask26);
[all …]
Dpoly1305-armv4.pl444 my ($D0,$D1,$D2,$D3,$D4, $H0,$H1,$H2,$H3,$H4) = map("q$_",(5..14));
562 @ 5*H4 - by 5*5 52-bit addends, or 57 bits. But when hashing the
564 @ 5*H4 by 5*5*3, or 59[!] bits. How is this relevant? vmlal.u32
570 @ one has to watch for H2 (which is narrower than H0) and 5*H4
748 vmov.32 $H4#lo[0],$padbit
758 vsri.u32 $H4#lo,$H3#lo,#8 @ base 2^32 -> base 2^26
763 vadd.i32 $H4#hi,$H4#lo,$D4#lo @ add hash value and move to #hi
792 vmov.i32 $H4,#1<<24 @ padbit, yes, always
807 vsri.u32 $H4,$H3,#8 @ base 2^32 -> base 2^26
863 vadd.i32 $H4#lo,$H4#lo,$D4#lo
[all …]
Dpoly1305-ppc.pl765 my ($H0, $H1, $H2, $H3, $H4) = map("v$_",(0..4));
1003 mtvrwz $H4,$d2
1061 vmulouw $ACC4,$H4,$R0
1063 vmulouw $T0,$H4,$S1
1076 vmulouw $T0,$H4,$S2
1089 vmulouw $T0,$H4,$S3
1102 vmulouw $T0,$H4,$S4
1111 vsrd $H4,$ACC3,$_26
1115 vaddudm $H4,$H4,$ACC4 # h3 -> h4
1118 vsrd $ACC4,$H4,$_26
[all …]
Dpoly1305-armv8.pl219 my ($H0,$H1,$H2,$H3,$H4) = map("v$_.2s",(24..28));
411 fmov ${H4},x14
460 fmov ${H4},x14
662 add $IN01_4,$IN01_4,$H4
702 xtn $H4,$ACC4
705 bic $H4,#0xfc,lsl#24
723 add $H4,$H4,$T1.2s // h3 -> h4
741 add $IN23_4,$IN01_4,$H4
804 add $IN01_4,$IN01_4,$H4
/third_party/node/deps/openssl/openssl/crypto/poly1305/asm/
Dpoly1305-c64xplus.pl32 ($H0,$H1,$H2,$H3,$H4,$H4a)=("A8","B8","A10","B10","B2",$LEN);
142 LDW *${CTXA}[4],$H4 ; load h4
170 || ADD $PADBIT,$H4,$H4 ; h4+=padbit
173 || ADD $D3,$H4,$H4
191 MPY32 $H4,$S1,B20
192 || MPY32 $H4,$S2a,A20
198 MPY32 $H4,$S3b,B22
205 MPY32 $H4,$R0b,$H4
231 ADD B31,$H4,$H4
234 SHRU $H4,2,B16 ; last reduction step
[all …]
Dpoly1305-x86_64.pl344 my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) =
639 vmovd $h2#d,$H4
742 vmovd $h2#d,$H4
776 vmovd 4*4($ctx),$H4
924 vmovdqa $H4,0x40(%r11) #
929 vmovdqa 0x40(%rsp),$H4 # s2^2
936 vpmuludq $T4,$H4,$H0 # h4*s2
937 vpmuludq $T3,$H4,$H4 # h3*s2
940 vpaddq $H4,$D0,$D0 # d0 += h3*s2
942 vmovdqa 0x80(%rsp),$H4 # s4^2
[all …]
Dpoly1305-s390x.pl225 my ($H0, $H1, $H2, $H3, $H4) = map("%v$_",(0..4));
313 vmalof ($ACC0,$H4,$S1,$ACC0);
314 vmalof ($ACC1,$H4,$S2,$ACC1);
315 vmalof ($ACC2,$H4,$S3,$ACC2);
316 vmalof ($ACC3,$H4,$S4,$ACC3);
317 vmalof ($ACC4,$H4,$R0,$ACC4);
322 vesrlg ($H4,$ACC3,26);
326 vag ($H4,$H4,$ACC4); # h3 -> h4
329 vesrlg ($ACC4,$H4,26);
331 vn ($H4,$H4,$mask26);
[all …]
Dpoly1305-armv4.pl444 my ($D0,$D1,$D2,$D3,$D4, $H0,$H1,$H2,$H3,$H4) = map("q$_",(5..14));
562 @ 5*H4 - by 5*5 52-bit addends, or 57 bits. But when hashing the
564 @ 5*H4 by 5*5*3, or 59[!] bits. How is this relevant? vmlal.u32
570 @ one has to watch for H2 (which is narrower than H0) and 5*H4
748 vmov.32 $H4#lo[0],$padbit
758 vsri.u32 $H4#lo,$H3#lo,#8 @ base 2^32 -> base 2^26
763 vadd.i32 $H4#hi,$H4#lo,$D4#lo @ add hash value and move to #hi
792 vmov.i32 $H4,#1<<24 @ padbit, yes, always
807 vsri.u32 $H4,$H3,#8 @ base 2^32 -> base 2^26
863 vadd.i32 $H4#lo,$H4#lo,$D4#lo
[all …]
Dpoly1305-ppc.pl765 my ($H0, $H1, $H2, $H3, $H4) = map("v$_",(0..4));
1003 mtvrwz $H4,$d2
1061 vmulouw $ACC4,$H4,$R0
1063 vmulouw $T0,$H4,$S1
1076 vmulouw $T0,$H4,$S2
1089 vmulouw $T0,$H4,$S3
1102 vmulouw $T0,$H4,$S4
1111 vsrd $H4,$ACC3,$_26
1115 vaddudm $H4,$H4,$ACC4 # h3 -> h4
1118 vsrd $ACC4,$H4,$_26
[all …]
Dpoly1305-armv8.pl219 my ($H0,$H1,$H2,$H3,$H4) = map("v$_.2s",(24..28));
411 fmov ${H4},x14
460 fmov ${H4},x14
662 add $IN01_4,$IN01_4,$H4
702 xtn $H4,$ACC4
705 bic $H4,#0xfc,lsl#24
723 add $H4,$H4,$T1.2s // h3 -> h4
741 add $IN23_4,$IN01_4,$H4
804 add $IN01_4,$IN01_4,$H4
/third_party/glslang/Test/
Dspv.builtin.PrimitiveShadingRateEXT.vert16 // V2 | H4 => 9
24 // V4 | H4 => 10
/third_party/node/deps/openssl/openssl/crypto/modes/asm/
Dghashp8-ppc.pl380 $Xh3,$Xm3,$IN3,$H4l,$H4,$H4h) = map("v$_",(20..31));
434 lvx_u $H4, r9,$Htbl
491 vpmsumd $Xm,$Xh,$H4 # H^4.hi·Xi.lo+H^4.lo·Xi.hi
531 vpmsumd $Xm,$Xh,$H4 # H^4.hi·Xi.lo+H^4.lo·Xi.hi
571 vmr $H4, $H3
594 vmr $H4, $H2
608 vmr $H4, $H
Dghashv8-armx.pl424 $I1,$I2,$I3,$H3,$H34,$H4,$Yl,$Ym,$Yh) = map("q$_",(4..7,15..23));
434 vld1.64 {$H3-$H4},[$Htbl] @ load twisted H^3, ..., H^4
489 vpmull.p64 $Xl,$H4,$IN @ H^4·(Xi+Ii)
491 vpmull2.p64 $Xh,$H4,$IN
544 vpmull.p64 $Xl,$H4,$IN @ H^4·(Xi+Ii)
546 vpmull2.p64 $Xh,$H4,$IN
/third_party/openssl/crypto/modes/asm/
Dghashp8-ppc.pl380 $Xh3,$Xm3,$IN3,$H4l,$H4,$H4h) = map("v$_",(20..31));
434 lvx_u $H4, r9,$Htbl
491 vpmsumd $Xm,$Xh,$H4 # H^4.hi·Xi.lo+H^4.lo·Xi.hi
531 vpmsumd $Xm,$Xh,$H4 # H^4.hi·Xi.lo+H^4.lo·Xi.hi
571 vmr $H4, $H3
594 vmr $H4, $H2
608 vmr $H4, $H
Dghashv8-armx.pl424 $I1,$I2,$I3,$H3,$H34,$H4,$Yl,$Ym,$Yh) = map("q$_",(4..7,15..23));
434 vld1.64 {$H3-$H4},[$Htbl] @ load twisted H^3, ..., H^4
489 vpmull.p64 $Xl,$H4,$IN @ H^4·(Xi+Ii)
491 vpmull2.p64 $Xh,$H4,$IN
544 vpmull.p64 $Xl,$H4,$IN @ H^4·(Xi+Ii)
546 vpmull2.p64 $Xh,$H4,$IN
/third_party/parse5/packages/parse5/lib/common/
Dhtml.ts83 H4 = 'h4', enumerator
255 H4, enumerator
413 [TAG_NAMES.H4, TAG_ID.H4],
553 $.H4,
628 return tn === $.H1 || tn === $.H2 || tn === $.H3 || tn === $.H4 || tn === $.H5 || tn === $.H6;
Dforeign-content.ts151 $.H4,
/third_party/skia/third_party/externals/brotli/c/enc/
Dbackward_references.c66 #define HASHER() H4
/third_party/node/deps/brotli/c/enc/
Dbackward_references.c66 #define HASHER() H4
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/
DAArch64CallingConvention.cpp27 AArch64::H3, AArch64::H4, AArch64::H5,
/third_party/node/deps/openssl/config/archs/linux-armv4/asm_avx2/crypto/poly1305/
Dpoly1305-armv4.S458 @ H0>>+H1>>+H2>>+H3>>+H4
459 @ H3>>+H4>>*5+H0>>+H1
472 @ H0, H2, H3 are guaranteed to be 26 bits wide, while H1 and H4
479 @ H4 = H4*R0 + H3*R1 + H2*R2 + H1*R3 + H0 * R4,
/third_party/node/deps/openssl/config/archs/linux-armv4/asm/crypto/poly1305/
Dpoly1305-armv4.S458 @ H0>>+H1>>+H2>>+H3>>+H4
459 @ H3>>+H4>>*5+H0>>+H1
472 @ H0, H2, H3 are guaranteed to be 26 bits wide, while H1 and H4
479 @ H4 = H4*R0 + H3*R1 + H2*R2 + H1*R3 + H0 * R4,

123