• Home
  • Raw
  • Download

Lines Matching refs:tmp

614 	u64 tmp[2 * ECC_MAX_DIGITS];  in vli_mmod_slow()  local
615 u64 *v[2] = { tmp, product }; in vli_mmod_slow()
687 const u64 *curve_prime, u64 *tmp) in vli_mmod_fast_192() argument
694 vli_set(tmp, &product[3], ndigits); in vli_mmod_fast_192()
695 carry = vli_add(result, result, tmp, ndigits); in vli_mmod_fast_192()
697 tmp[0] = 0; in vli_mmod_fast_192()
698 tmp[1] = product[3]; in vli_mmod_fast_192()
699 tmp[2] = product[4]; in vli_mmod_fast_192()
700 carry += vli_add(result, result, tmp, ndigits); in vli_mmod_fast_192()
702 tmp[0] = tmp[1] = product[5]; in vli_mmod_fast_192()
703 tmp[2] = 0; in vli_mmod_fast_192()
704 carry += vli_add(result, result, tmp, ndigits); in vli_mmod_fast_192()
714 const u64 *curve_prime, u64 *tmp) in vli_mmod_fast_256() argument
723 tmp[0] = 0; in vli_mmod_fast_256()
724 tmp[1] = product[5] & 0xffffffff00000000ull; in vli_mmod_fast_256()
725 tmp[2] = product[6]; in vli_mmod_fast_256()
726 tmp[3] = product[7]; in vli_mmod_fast_256()
727 carry = vli_lshift(tmp, tmp, 1, ndigits); in vli_mmod_fast_256()
728 carry += vli_add(result, result, tmp, ndigits); in vli_mmod_fast_256()
731 tmp[1] = product[6] << 32; in vli_mmod_fast_256()
732 tmp[2] = (product[6] >> 32) | (product[7] << 32); in vli_mmod_fast_256()
733 tmp[3] = product[7] >> 32; in vli_mmod_fast_256()
734 carry += vli_lshift(tmp, tmp, 1, ndigits); in vli_mmod_fast_256()
735 carry += vli_add(result, result, tmp, ndigits); in vli_mmod_fast_256()
738 tmp[0] = product[4]; in vli_mmod_fast_256()
739 tmp[1] = product[5] & 0xffffffff; in vli_mmod_fast_256()
740 tmp[2] = 0; in vli_mmod_fast_256()
741 tmp[3] = product[7]; in vli_mmod_fast_256()
742 carry += vli_add(result, result, tmp, ndigits); in vli_mmod_fast_256()
745 tmp[0] = (product[4] >> 32) | (product[5] << 32); in vli_mmod_fast_256()
746 tmp[1] = (product[5] >> 32) | (product[6] & 0xffffffff00000000ull); in vli_mmod_fast_256()
747 tmp[2] = product[7]; in vli_mmod_fast_256()
748 tmp[3] = (product[6] >> 32) | (product[4] << 32); in vli_mmod_fast_256()
749 carry += vli_add(result, result, tmp, ndigits); in vli_mmod_fast_256()
752 tmp[0] = (product[5] >> 32) | (product[6] << 32); in vli_mmod_fast_256()
753 tmp[1] = (product[6] >> 32); in vli_mmod_fast_256()
754 tmp[2] = 0; in vli_mmod_fast_256()
755 tmp[3] = (product[4] & 0xffffffff) | (product[5] << 32); in vli_mmod_fast_256()
756 carry -= vli_sub(result, result, tmp, ndigits); in vli_mmod_fast_256()
759 tmp[0] = product[6]; in vli_mmod_fast_256()
760 tmp[1] = product[7]; in vli_mmod_fast_256()
761 tmp[2] = 0; in vli_mmod_fast_256()
762 tmp[3] = (product[4] >> 32) | (product[5] & 0xffffffff00000000ull); in vli_mmod_fast_256()
763 carry -= vli_sub(result, result, tmp, ndigits); in vli_mmod_fast_256()
766 tmp[0] = (product[6] >> 32) | (product[7] << 32); in vli_mmod_fast_256()
767 tmp[1] = (product[7] >> 32) | (product[4] << 32); in vli_mmod_fast_256()
768 tmp[2] = (product[4] >> 32) | (product[5] << 32); in vli_mmod_fast_256()
769 tmp[3] = (product[6] << 32); in vli_mmod_fast_256()
770 carry -= vli_sub(result, result, tmp, ndigits); in vli_mmod_fast_256()
773 tmp[0] = product[7]; in vli_mmod_fast_256()
774 tmp[1] = product[4] & 0xffffffff00000000ull; in vli_mmod_fast_256()
775 tmp[2] = product[5]; in vli_mmod_fast_256()
776 tmp[3] = product[6] & 0xffffffff00000000ull; in vli_mmod_fast_256()
777 carry -= vli_sub(result, result, tmp, ndigits); in vli_mmod_fast_256()
797 const u64 *curve_prime, u64 *tmp) in vli_mmod_fast_384() argument
806 tmp[0] = 0; // 0 || 0 in vli_mmod_fast_384()
807 tmp[1] = 0; // 0 || 0 in vli_mmod_fast_384()
808 tmp[2] = SL32OR32(product[11], (product[10]>>32)); //a22||a21 in vli_mmod_fast_384()
809 tmp[3] = product[11]>>32; // 0 ||a23 in vli_mmod_fast_384()
810 tmp[4] = 0; // 0 || 0 in vli_mmod_fast_384()
811 tmp[5] = 0; // 0 || 0 in vli_mmod_fast_384()
812 carry = vli_lshift(tmp, tmp, 1, ndigits); in vli_mmod_fast_384()
813 carry += vli_add(result, result, tmp, ndigits); in vli_mmod_fast_384()
816 tmp[0] = product[6]; //a13||a12 in vli_mmod_fast_384()
817 tmp[1] = product[7]; //a15||a14 in vli_mmod_fast_384()
818 tmp[2] = product[8]; //a17||a16 in vli_mmod_fast_384()
819 tmp[3] = product[9]; //a19||a18 in vli_mmod_fast_384()
820 tmp[4] = product[10]; //a21||a20 in vli_mmod_fast_384()
821 tmp[5] = product[11]; //a23||a22 in vli_mmod_fast_384()
822 carry += vli_add(result, result, tmp, ndigits); in vli_mmod_fast_384()
825 tmp[0] = SL32OR32(product[11], (product[10]>>32)); //a22||a21 in vli_mmod_fast_384()
826 tmp[1] = SL32OR32(product[6], (product[11]>>32)); //a12||a23 in vli_mmod_fast_384()
827 tmp[2] = SL32OR32(product[7], (product[6])>>32); //a14||a13 in vli_mmod_fast_384()
828 tmp[3] = SL32OR32(product[8], (product[7]>>32)); //a16||a15 in vli_mmod_fast_384()
829 tmp[4] = SL32OR32(product[9], (product[8]>>32)); //a18||a17 in vli_mmod_fast_384()
830 tmp[5] = SL32OR32(product[10], (product[9]>>32)); //a20||a19 in vli_mmod_fast_384()
831 carry += vli_add(result, result, tmp, ndigits); in vli_mmod_fast_384()
834 tmp[0] = AND64H(product[11]); //a23|| 0 in vli_mmod_fast_384()
835 tmp[1] = (product[10]<<32); //a20|| 0 in vli_mmod_fast_384()
836 tmp[2] = product[6]; //a13||a12 in vli_mmod_fast_384()
837 tmp[3] = product[7]; //a15||a14 in vli_mmod_fast_384()
838 tmp[4] = product[8]; //a17||a16 in vli_mmod_fast_384()
839 tmp[5] = product[9]; //a19||a18 in vli_mmod_fast_384()
840 carry += vli_add(result, result, tmp, ndigits); in vli_mmod_fast_384()
843 tmp[0] = 0; // 0|| 0 in vli_mmod_fast_384()
844 tmp[1] = 0; // 0|| 0 in vli_mmod_fast_384()
845 tmp[2] = product[10]; //a21||a20 in vli_mmod_fast_384()
846 tmp[3] = product[11]; //a23||a22 in vli_mmod_fast_384()
847 tmp[4] = 0; // 0|| 0 in vli_mmod_fast_384()
848 tmp[5] = 0; // 0|| 0 in vli_mmod_fast_384()
849 carry += vli_add(result, result, tmp, ndigits); in vli_mmod_fast_384()
852 tmp[0] = AND64L(product[10]); // 0 ||a20 in vli_mmod_fast_384()
853 tmp[1] = AND64H(product[10]); //a21|| 0 in vli_mmod_fast_384()
854 tmp[2] = product[11]; //a23||a22 in vli_mmod_fast_384()
855 tmp[3] = 0; // 0 || 0 in vli_mmod_fast_384()
856 tmp[4] = 0; // 0 || 0 in vli_mmod_fast_384()
857 tmp[5] = 0; // 0 || 0 in vli_mmod_fast_384()
858 carry += vli_add(result, result, tmp, ndigits); in vli_mmod_fast_384()
861 tmp[0] = SL32OR32(product[6], (product[11]>>32)); //a12||a23 in vli_mmod_fast_384()
862 tmp[1] = SL32OR32(product[7], (product[6]>>32)); //a14||a13 in vli_mmod_fast_384()
863 tmp[2] = SL32OR32(product[8], (product[7]>>32)); //a16||a15 in vli_mmod_fast_384()
864 tmp[3] = SL32OR32(product[9], (product[8]>>32)); //a18||a17 in vli_mmod_fast_384()
865 tmp[4] = SL32OR32(product[10], (product[9]>>32)); //a20||a19 in vli_mmod_fast_384()
866 tmp[5] = SL32OR32(product[11], (product[10]>>32)); //a22||a21 in vli_mmod_fast_384()
867 carry -= vli_sub(result, result, tmp, ndigits); in vli_mmod_fast_384()
870 tmp[0] = (product[10]<<32); //a20|| 0 in vli_mmod_fast_384()
871 tmp[1] = SL32OR32(product[11], (product[10]>>32)); //a22||a21 in vli_mmod_fast_384()
872 tmp[2] = (product[11]>>32); // 0 ||a23 in vli_mmod_fast_384()
873 tmp[3] = 0; // 0 || 0 in vli_mmod_fast_384()
874 tmp[4] = 0; // 0 || 0 in vli_mmod_fast_384()
875 tmp[5] = 0; // 0 || 0 in vli_mmod_fast_384()
876 carry -= vli_sub(result, result, tmp, ndigits); in vli_mmod_fast_384()
879 tmp[0] = 0; // 0 || 0 in vli_mmod_fast_384()
880 tmp[1] = AND64H(product[11]); //a23|| 0 in vli_mmod_fast_384()
881 tmp[2] = product[11]>>32; // 0 ||a23 in vli_mmod_fast_384()
882 tmp[3] = 0; // 0 || 0 in vli_mmod_fast_384()
883 tmp[4] = 0; // 0 || 0 in vli_mmod_fast_384()
884 tmp[5] = 0; // 0 || 0 in vli_mmod_fast_384()
885 carry -= vli_sub(result, result, tmp, ndigits); in vli_mmod_fast_384()
910 u64 tmp[2 * ECC_MAX_DIGITS]; in vli_mmod_fast() local
933 vli_mmod_fast_192(result, product, curve_prime, tmp); in vli_mmod_fast()
936 vli_mmod_fast_256(result, product, curve_prime, tmp); in vli_mmod_fast()
939 vli_mmod_fast_384(result, product, curve_prime, tmp); in vli_mmod_fast()