• Home
  • Raw
  • Download

Lines Matching refs:u64

19 static __always_inline u64 u64_eq_mask(u64 a, u64 b)  in u64_eq_mask()
21 u64 x = a ^ b; in u64_eq_mask()
22 u64 minus_x = ~x + (u64)1U; in u64_eq_mask()
23 u64 x_or_minus_x = x | minus_x; in u64_eq_mask()
24 u64 xnx = x_or_minus_x >> (u32)63U; in u64_eq_mask()
25 u64 c = xnx - (u64)1U; in u64_eq_mask()
29 static __always_inline u64 u64_gte_mask(u64 a, u64 b) in u64_gte_mask()
31 u64 x = a; in u64_gte_mask()
32 u64 y = b; in u64_gte_mask()
33 u64 x_xor_y = x ^ y; in u64_gte_mask()
34 u64 x_sub_y = x - y; in u64_gte_mask()
35 u64 x_sub_y_xor_y = x_sub_y ^ y; in u64_gte_mask()
36 u64 q = x_xor_y | x_sub_y_xor_y; in u64_gte_mask()
37 u64 x_xor_q = x ^ q; in u64_gte_mask()
38 u64 x_xor_q_ = x_xor_q >> (u32)63U; in u64_gte_mask()
39 u64 c = x_xor_q_ - (u64)1U; in u64_gte_mask()
43 static __always_inline void modulo_carry_top(u64 *b) in modulo_carry_top()
45 u64 b4 = b[4]; in modulo_carry_top()
46 u64 b0 = b[0]; in modulo_carry_top()
47 u64 b4_ = b4 & 0x7ffffffffffffLLU; in modulo_carry_top()
48 u64 b0_ = b0 + 19 * (b4 >> 51); in modulo_carry_top()
53 static __always_inline void fproduct_copy_from_wide_(u64 *output, u128 *input) in fproduct_copy_from_wide_()
57 output[0] = ((u64)(xi)); in fproduct_copy_from_wide_()
61 output[1] = ((u64)(xi)); in fproduct_copy_from_wide_()
65 output[2] = ((u64)(xi)); in fproduct_copy_from_wide_()
69 output[3] = ((u64)(xi)); in fproduct_copy_from_wide_()
73 output[4] = ((u64)(xi)); in fproduct_copy_from_wide_()
78 fproduct_sum_scalar_multiplication_(u128 *output, u64 *input, u64 s) in fproduct_sum_scalar_multiplication_()
93 u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU; in fproduct_carry_wide_()
102 u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU; in fproduct_carry_wide_()
112 u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU; in fproduct_carry_wide_()
121 u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU; in fproduct_carry_wide_()
128 static __always_inline void fmul_shift_reduce(u64 *output) in fmul_shift_reduce()
130 u64 tmp = output[4]; in fmul_shift_reduce()
131 u64 b0; in fmul_shift_reduce()
134 u64 z = output[ctr - 1]; in fmul_shift_reduce()
139 u64 z = output[ctr - 1]; in fmul_shift_reduce()
144 u64 z = output[ctr - 1]; in fmul_shift_reduce()
149 u64 z = output[ctr - 1]; in fmul_shift_reduce()
157 static __always_inline void fmul_mul_shift_reduce_(u128 *output, u64 *input, in fmul_mul_shift_reduce_()
158 u64 *input21) in fmul_mul_shift_reduce_()
161 u64 input2i; in fmul_mul_shift_reduce_()
163 u64 input2i = input21[0]; in fmul_mul_shift_reduce_()
168 u64 input2i = input21[1]; in fmul_mul_shift_reduce_()
173 u64 input2i = input21[2]; in fmul_mul_shift_reduce_()
178 u64 input2i = input21[3]; in fmul_mul_shift_reduce_()
187 static __always_inline void fmul_fmul(u64 *output, u64 *input, u64 *input21) in fmul_fmul()
189 u64 tmp[5] = { input[0], input[1], input[2], input[3], input[4] }; in fmul_fmul()
195 u64 i0; in fmul_fmul()
196 u64 i1; in fmul_fmul()
197 u64 i0_; in fmul_fmul()
198 u64 i1_; in fmul_fmul()
205 b0_ = ((b0) + (((u128)(19) * (((u64)(((b4) >> (51)))))))); in fmul_fmul()
218 static __always_inline void fsquare_fsquare__(u128 *tmp, u64 *output) in fsquare_fsquare__()
220 u64 r0 = output[0]; in fsquare_fsquare__()
221 u64 r1 = output[1]; in fsquare_fsquare__()
222 u64 r2 = output[2]; in fsquare_fsquare__()
223 u64 r3 = output[3]; in fsquare_fsquare__()
224 u64 r4 = output[4]; in fsquare_fsquare__()
225 u64 d0 = r0 * 2; in fsquare_fsquare__()
226 u64 d1 = r1 * 2; in fsquare_fsquare__()
227 u64 d2 = r2 * 2 * 19; in fsquare_fsquare__()
228 u64 d419 = r4 * 19; in fsquare_fsquare__()
229 u64 d4 = d419 * 2; in fsquare_fsquare__()
247 static __always_inline void fsquare_fsquare_(u128 *tmp, u64 *output) in fsquare_fsquare_()
253 u64 i0; in fsquare_fsquare_()
254 u64 i1; in fsquare_fsquare_()
255 u64 i0_; in fsquare_fsquare_()
256 u64 i1_; in fsquare_fsquare_()
262 b0_ = ((b0) + (((u128)(19) * (((u64)(((b4) >> (51)))))))); in fsquare_fsquare_()
274 static __always_inline void fsquare_fsquare_times_(u64 *output, u128 *tmp, in fsquare_fsquare_times_()
283 static __always_inline void fsquare_fsquare_times(u64 *output, u64 *input, in fsquare_fsquare_times()
291 static __always_inline void fsquare_fsquare_times_inplace(u64 *output, in fsquare_fsquare_times_inplace()
298 static __always_inline void crecip_crecip(u64 *out, u64 *z) in crecip_crecip()
300 u64 buf[20] = { 0 }; in crecip_crecip()
301 u64 *a0 = buf; in crecip_crecip()
302 u64 *t00 = buf + 5; in crecip_crecip()
303 u64 *b0 = buf + 10; in crecip_crecip()
304 u64 *t01; in crecip_crecip()
305 u64 *b1; in crecip_crecip()
306 u64 *c0; in crecip_crecip()
307 u64 *a; in crecip_crecip()
308 u64 *t0; in crecip_crecip()
309 u64 *b; in crecip_crecip()
310 u64 *c; in crecip_crecip()
342 static __always_inline void fsum(u64 *a, u64 *b) in fsum()
351 static __always_inline void fdifference(u64 *a, u64 *b) in fdifference()
353 u64 tmp[5] = { 0 }; in fdifference()
354 u64 b0; in fdifference()
355 u64 b1; in fdifference()
356 u64 b2; in fdifference()
357 u64 b3; in fdifference()
358 u64 b4; in fdifference()
371 u64 xi = a[0]; in fdifference()
372 u64 yi = tmp[0]; in fdifference()
376 u64 xi = a[1]; in fdifference()
377 u64 yi = tmp[1]; in fdifference()
381 u64 xi = a[2]; in fdifference()
382 u64 yi = tmp[2]; in fdifference()
386 u64 xi = a[3]; in fdifference()
387 u64 yi = tmp[3]; in fdifference()
391 u64 xi = a[4]; in fdifference()
392 u64 yi = tmp[4]; in fdifference()
397 static __always_inline void fscalar(u64 *output, u64 *b, u64 s) in fscalar()
405 u64 xi = b[0]; in fscalar()
409 u64 xi = b[1]; in fscalar()
413 u64 xi = b[2]; in fscalar()
417 u64 xi = b[3]; in fscalar()
421 u64 xi = b[4]; in fscalar()
428 b0_ = ((b0) + (((u128)(19) * (((u64)(((b4) >> (51)))))))); in fscalar()
434 static __always_inline void fmul(u64 *output, u64 *a, u64 *b) in fmul()
439 static __always_inline void crecip(u64 *output, u64 *input) in crecip()
444 static __always_inline void point_swap_conditional_step(u64 *a, u64 *b, in point_swap_conditional_step()
445 u64 swap1, u32 ctr) in point_swap_conditional_step()
448 u64 ai = a[i]; in point_swap_conditional_step()
449 u64 bi = b[i]; in point_swap_conditional_step()
450 u64 x = swap1 & (ai ^ bi); in point_swap_conditional_step()
451 u64 ai1 = ai ^ x; in point_swap_conditional_step()
452 u64 bi1 = bi ^ x; in point_swap_conditional_step()
457 static __always_inline void point_swap_conditional5(u64 *a, u64 *b, u64 swap1) in point_swap_conditional5()
466 static __always_inline void point_swap_conditional(u64 *a, u64 *b, u64 iswap) in point_swap_conditional()
468 u64 swap1 = 0 - iswap; in point_swap_conditional()
473 static __always_inline void point_copy(u64 *output, u64 *input) in point_copy()
479 static __always_inline void addanddouble_fmonty(u64 *pp, u64 *ppq, u64 *p, in addanddouble_fmonty()
480 u64 *pq, u64 *qmqp) in addanddouble_fmonty()
482 u64 *qx = qmqp; in addanddouble_fmonty()
483 u64 *x2 = pp; in addanddouble_fmonty()
484 u64 *z2 = pp + 5; in addanddouble_fmonty()
485 u64 *x3 = ppq; in addanddouble_fmonty()
486 u64 *z3 = ppq + 5; in addanddouble_fmonty()
487 u64 *x = p; in addanddouble_fmonty()
488 u64 *z = p + 5; in addanddouble_fmonty()
489 u64 *xprime = pq; in addanddouble_fmonty()
490 u64 *zprime = pq + 5; in addanddouble_fmonty()
491 u64 buf[40] = { 0 }; in addanddouble_fmonty()
492 u64 *origx = buf; in addanddouble_fmonty()
493 u64 *origxprime0 = buf + 5; in addanddouble_fmonty()
494 u64 *xxprime0; in addanddouble_fmonty()
495 u64 *zzprime0; in addanddouble_fmonty()
496 u64 *origxprime; in addanddouble_fmonty()
509 u64 *xx0; in addanddouble_fmonty()
510 u64 *zz0; in addanddouble_fmonty()
511 u64 *xxprime; in addanddouble_fmonty()
512 u64 *zzprime; in addanddouble_fmonty()
513 u64 *zzzprime; in addanddouble_fmonty()
528 u64 *zzz; in addanddouble_fmonty()
529 u64 *xx; in addanddouble_fmonty()
530 u64 *zz; in addanddouble_fmonty()
531 u64 scalar; in addanddouble_fmonty()
546 ladder_smallloop_cmult_small_loop_step(u64 *nq, u64 *nqpq, u64 *nq2, u64 *nqpq2, in ladder_smallloop_cmult_small_loop_step()
547 u64 *q, u8 byt) in ladder_smallloop_cmult_small_loop_step()
549 u64 bit0 = (u64)(byt >> 7); in ladder_smallloop_cmult_small_loop_step()
550 u64 bit; in ladder_smallloop_cmult_small_loop_step()
553 bit = (u64)(byt >> 7); in ladder_smallloop_cmult_small_loop_step()
558 ladder_smallloop_cmult_small_loop_double_step(u64 *nq, u64 *nqpq, u64 *nq2, in ladder_smallloop_cmult_small_loop_double_step()
559 u64 *nqpq2, u64 *q, u8 byt) in ladder_smallloop_cmult_small_loop_double_step()
568 ladder_smallloop_cmult_small_loop(u64 *nq, u64 *nqpq, u64 *nq2, u64 *nqpq2, in ladder_smallloop_cmult_small_loop()
569 u64 *q, u8 byt, u32 i) in ladder_smallloop_cmult_small_loop()
578 static __always_inline void ladder_bigloop_cmult_big_loop(u8 *n1, u64 *nq, in ladder_bigloop_cmult_big_loop()
579 u64 *nqpq, u64 *nq2, in ladder_bigloop_cmult_big_loop()
580 u64 *nqpq2, u64 *q, in ladder_bigloop_cmult_big_loop()
590 static void ladder_cmult(u64 *result, u8 *n1, u64 *q) in ladder_cmult()
592 u64 point_buf[40] = { 0 }; in ladder_cmult()
593 u64 *nq = point_buf; in ladder_cmult()
594 u64 *nqpq = point_buf + 10; in ladder_cmult()
595 u64 *nq2 = point_buf + 20; in ladder_cmult()
596 u64 *nqpq2 = point_buf + 30; in ladder_cmult()
603 static __always_inline void format_fexpand(u64 *output, const u8 *input) in format_fexpand()
609 u64 i0, i1, i2, i3, i4, output0, output1, output2, output3, output4; in format_fexpand()
627 static __always_inline void format_fcontract_first_carry_pass(u64 *input) in format_fcontract_first_carry_pass()
629 u64 t0 = input[0]; in format_fcontract_first_carry_pass()
630 u64 t1 = input[1]; in format_fcontract_first_carry_pass()
631 u64 t2 = input[2]; in format_fcontract_first_carry_pass()
632 u64 t3 = input[3]; in format_fcontract_first_carry_pass()
633 u64 t4 = input[4]; in format_fcontract_first_carry_pass()
634 u64 t1_ = t1 + (t0 >> 51); in format_fcontract_first_carry_pass()
635 u64 t0_ = t0 & 0x7ffffffffffffLLU; in format_fcontract_first_carry_pass()
636 u64 t2_ = t2 + (t1_ >> 51); in format_fcontract_first_carry_pass()
637 u64 t1__ = t1_ & 0x7ffffffffffffLLU; in format_fcontract_first_carry_pass()
638 u64 t3_ = t3 + (t2_ >> 51); in format_fcontract_first_carry_pass()
639 u64 t2__ = t2_ & 0x7ffffffffffffLLU; in format_fcontract_first_carry_pass()
640 u64 t4_ = t4 + (t3_ >> 51); in format_fcontract_first_carry_pass()
641 u64 t3__ = t3_ & 0x7ffffffffffffLLU; in format_fcontract_first_carry_pass()
649 static __always_inline void format_fcontract_first_carry_full(u64 *input) in format_fcontract_first_carry_full()
655 static __always_inline void format_fcontract_second_carry_pass(u64 *input) in format_fcontract_second_carry_pass()
657 u64 t0 = input[0]; in format_fcontract_second_carry_pass()
658 u64 t1 = input[1]; in format_fcontract_second_carry_pass()
659 u64 t2 = input[2]; in format_fcontract_second_carry_pass()
660 u64 t3 = input[3]; in format_fcontract_second_carry_pass()
661 u64 t4 = input[4]; in format_fcontract_second_carry_pass()
662 u64 t1_ = t1 + (t0 >> 51); in format_fcontract_second_carry_pass()
663 u64 t0_ = t0 & 0x7ffffffffffffLLU; in format_fcontract_second_carry_pass()
664 u64 t2_ = t2 + (t1_ >> 51); in format_fcontract_second_carry_pass()
665 u64 t1__ = t1_ & 0x7ffffffffffffLLU; in format_fcontract_second_carry_pass()
666 u64 t3_ = t3 + (t2_ >> 51); in format_fcontract_second_carry_pass()
667 u64 t2__ = t2_ & 0x7ffffffffffffLLU; in format_fcontract_second_carry_pass()
668 u64 t4_ = t4 + (t3_ >> 51); in format_fcontract_second_carry_pass()
669 u64 t3__ = t3_ & 0x7ffffffffffffLLU; in format_fcontract_second_carry_pass()
677 static __always_inline void format_fcontract_second_carry_full(u64 *input) in format_fcontract_second_carry_full()
679 u64 i0; in format_fcontract_second_carry_full()
680 u64 i1; in format_fcontract_second_carry_full()
681 u64 i0_; in format_fcontract_second_carry_full()
682 u64 i1_; in format_fcontract_second_carry_full()
693 static __always_inline void format_fcontract_trim(u64 *input) in format_fcontract_trim()
695 u64 a0 = input[0]; in format_fcontract_trim()
696 u64 a1 = input[1]; in format_fcontract_trim()
697 u64 a2 = input[2]; in format_fcontract_trim()
698 u64 a3 = input[3]; in format_fcontract_trim()
699 u64 a4 = input[4]; in format_fcontract_trim()
700 u64 mask0 = u64_gte_mask(a0, 0x7ffffffffffedLLU); in format_fcontract_trim()
701 u64 mask1 = u64_eq_mask(a1, 0x7ffffffffffffLLU); in format_fcontract_trim()
702 u64 mask2 = u64_eq_mask(a2, 0x7ffffffffffffLLU); in format_fcontract_trim()
703 u64 mask3 = u64_eq_mask(a3, 0x7ffffffffffffLLU); in format_fcontract_trim()
704 u64 mask4 = u64_eq_mask(a4, 0x7ffffffffffffLLU); in format_fcontract_trim()
705 u64 mask = (((mask0 & mask1) & mask2) & mask3) & mask4; in format_fcontract_trim()
706 u64 a0_ = a0 - (0x7ffffffffffedLLU & mask); in format_fcontract_trim()
707 u64 a1_ = a1 - (0x7ffffffffffffLLU & mask); in format_fcontract_trim()
708 u64 a2_ = a2 - (0x7ffffffffffffLLU & mask); in format_fcontract_trim()
709 u64 a3_ = a3 - (0x7ffffffffffffLLU & mask); in format_fcontract_trim()
710 u64 a4_ = a4 - (0x7ffffffffffffLLU & mask); in format_fcontract_trim()
718 static __always_inline void format_fcontract_store(u8 *output, u64 *input) in format_fcontract_store()
720 u64 t0 = input[0]; in format_fcontract_store()
721 u64 t1 = input[1]; in format_fcontract_store()
722 u64 t2 = input[2]; in format_fcontract_store()
723 u64 t3 = input[3]; in format_fcontract_store()
724 u64 t4 = input[4]; in format_fcontract_store()
725 u64 o0 = t1 << 51 | t0; in format_fcontract_store()
726 u64 o1 = t2 << 38 | t1 >> 13; in format_fcontract_store()
727 u64 o2 = t3 << 25 | t2 >> 26; in format_fcontract_store()
728 u64 o3 = t4 << 12 | t3 >> 39; in format_fcontract_store()
739 static __always_inline void format_fcontract(u8 *output, u64 *input) in format_fcontract()
747 static __always_inline void format_scalar_of_point(u8 *scalar, u64 *point) in format_scalar_of_point()
749 u64 *x = point; in format_scalar_of_point()
750 u64 *z = point + 5; in format_scalar_of_point()
751 u64 buf[10] __aligned(32) = { 0 }; in format_scalar_of_point()
752 u64 *zmone = buf; in format_scalar_of_point()
753 u64 *sc = buf + 5; in format_scalar_of_point()
763 u64 buf0[10] __aligned(32) = { 0 }; in curve25519_generic()
764 u64 *x0 = buf0; in curve25519_generic()
765 u64 *z = buf0 + 5; in curve25519_generic()
766 u64 *q; in curve25519_generic()
777 u64 buf[15] = { 0 }; in curve25519_generic()
778 u64 *nq = buf; in curve25519_generic()
779 u64 *x = nq; in curve25519_generic()