• Home
  • Raw
  • Download

Lines Matching +full:rust +full:- +full:embedded

1 //! A simple big-integer type for slow path algorithms.
3 //! This includes minimal stackvector for use in big-integer arithmetic.
20 /// ≅ 3600 for base-10, rounded-up.
63 pub fn new() -> Self { in new()
71 pub fn from_u64(value: u64) -> Self { in from_u64()
78 pub fn hi64(&self) -> (u64, bool) { in hi64()
84 pub fn pow(&mut self, base: u32, exp: u32) -> Option<()> { in pow()
95 /// Calculate the bit-length of the big-integer.
97 pub fn bit_length(&self) -> u32 { in bit_length()
119 fn index(&self, index: usize) -> &T { in index()
121 &(*self.inner)[len - index - 1] in index()
127 pub fn rview(x: &[Limb]) -> ReverseView<Limb> { in rview()
134 // -------
136 /// Compare `x` to `y`, in little-endian order.
138 pub fn compare(x: &[Limb], y: &[Limb]) -> cmp::Ordering { in compare()
156 // ---------
161 // We don't care if this wraps: the index is bounds-checked. in normalize()
164 unsafe { x.set_len(x.len() - 1) }; in normalize()
174 pub fn is_normalized(x: &[Limb]) -> bool { in is_normalized()
175 // We don't care if this wraps: the index is bounds-checked. in is_normalized()
183 // ----
188 pub fn from_u64(x: u64) -> VecType { in from_u64()
202 // --
204 /// Check if any of the remaining bits are non-zero.
210 pub fn nonzero(x: &[Limb], rindex: usize) -> bool { in nonzero()
214 let slc = &x[..len - rindex]; in nonzero()
220 /// Shift 32-bit integer to high 64-bits.
222 pub fn u32_to_hi64_1(r0: u32) -> (u64, bool) { in u32_to_hi64_1()
226 /// Shift 2 32-bit integers to high 64-bits.
228 pub fn u32_to_hi64_2(r0: u32, r1: u32) -> (u64, bool) { in u32_to_hi64_2()
234 /// Shift 3 32-bit integers to high 64-bits.
236 pub fn u32_to_hi64_3(r0: u32, r1: u32, r2: u32) -> (u64, bool) { in u32_to_hi64_3()
243 /// Shift 64-bit integer to high 64-bits.
245 pub fn u64_to_hi64_1(r0: u64) -> (u64, bool) { in u64_to_hi64_1()
250 /// Shift 2 64-bit integers to high 64-bits.
252 pub fn u64_to_hi64_2(r0: u64, r1: u64) -> (u64, bool) { in u64_to_hi64_2()
254 let rs = 64 - ls; in u64_to_hi64_2()
310 pub fn hi64(x: &[Limb]) -> (u64, bool) { in hi64()
325 // ------
333 /// and `O(n*m)` for the result. Since `m` is typically a lower-order
343 /// test bigcomp_f32_lexical ... bench: 1,018 ns/iter (+/- 78)
344 /// test bigcomp_f64_lexical ... bench: 3,639 ns/iter (+/- 1,007)
348 /// test bigcomp_f32_lexical ... bench: 518 ns/iter (+/- 31)
349 /// test bigcomp_f64_lexical ... bench: 583 ns/iter (+/- 47)
353 /// test bigcomp_f32_lexical ... bench: 671 ns/iter (+/- 31)
354 /// test bigcomp_f64_lexical ... bench: 1,394 ns/iter (+/- 47)
357 /// a version with only small powers, and one with pre-computed powers
366 /// Even using worst-case scenarios, exponentiation by squaring is
373 pub fn pow(x: &mut VecType, mut exp: u32) -> Option<()> { in pow()
380 exp -= LARGE_POW5_STEP; in pow()
384 // Now use our pre-computed small powers iteratively. in pow()
385 // This is calculated as `⌊log(2^BITS - 1, 5)⌋`. in pow()
394 exp -= small_step; in pow()
405 // ------
409 pub fn scalar_add(x: Limb, y: Limb) -> (Limb, bool) { in scalar_add()
417 pub fn scalar_mul(x: Limb, y: Limb, carry: Limb) -> (Limb, Limb) { in scalar_mul()
420 // `Wide::MAX - (Narrow::MAX * Narrow::MAX) >= Narrow::MAX` in scalar_mul()
426 // -----
430 pub fn small_add_from(x: &mut VecType, y: Limb, start: usize) -> Option<()> { in small_add_from()
448 pub fn small_add(x: &mut VecType, y: Limb) -> Option<()> { in small_add()
454 pub fn small_mul(x: &mut VecType, y: Limb) -> Option<()> { in small_mul()
469 // -----
472 pub fn large_add_from(x: &mut VecType, y: &[Limb], start: usize) -> Option<()> { in large_add_from()
511 pub fn large_add(x: &mut VecType, y: &[Limb]) -> Option<()> { in large_add()
515 /// Grade-school multiplication algorithm.
517 /// Slow, naive algorithm, using limb-bit bases and just shifting left for
521 /// `n` multiplications, and `n` additions, or grade-school multiplication.
576 /// In short, Karatsuba multiplication is never worthwhile for out use-case.
577 pub fn long_mul(x: &[Limb], y: &[Limb]) -> Option<VecType> { in long_mul()
600 /// Multiply bigint by bigint using grade-school multiplication algorithm.
602 pub fn large_mul(x: &mut VecType, y: &[Limb]) -> Option<()> { in large_mul()
615 // -----
617 /// Shift-left `n` bits inside a buffer.
619 pub fn shl_bits(x: &mut VecType, n: usize) -> Option<()> { in shl_bits()
623 // right shifted limb-bits. in shl_bits()
628 let rshift = LIMB_BITS - n; in shl_bits()
638 // Always push the carry, even if it creates a non-normal result. in shl_bits()
647 /// Shift-left `n` limbs inside a buffer.
649 pub fn shl_limbs(x: &mut VecType, n: usize) -> Option<()> { in shl_limbs()
671 /// Shift-left buffer by n bits.
673 pub fn shl(x: &mut VecType, n: usize) -> Option<()> { in shl()
687 pub fn leading_zeros(x: &[Limb]) -> u32 { in leading_zeros()
697 /// Calculate the bit-length of the big-integer.
699 pub fn bit_length(x: &[Limb]) -> u32 { in bit_length()
701 LIMB_BITS as u32 * x.len() as u32 - nlz in bit_length()
705 // ----
709 // A limb is analogous to a digit in base10, except, it stores 32-bit
710 // or 64-bit numbers instead. We want types where 64-bit multiplication
711 // is well-supported by the architecture, rather than emulated in 3
713 // cross-compiler for numerous architectures, along with the following
716 // Compile with `gcc main.c -c -S -O3 -masm=intel`
740 // This should be all-known 64-bit platforms supported by Rust.
741 // https://forge.rust-lang.org/platform-support.html
745 // Platforms where native 128-bit multiplication is explicitly supported:
746 // - x86_64 (Supported via `MUL`).
747 // - mips64 (Supported via `DMULTU`, which `HI` and `LO` can be read-from).
748 // - s390x (Supported via `MLGR`).
752 // Platforms where native 64-bit multiplication is supported and
753 // you can extract hi-lo for 64-bit multiplications.
754 // - aarch64 (Requires `UMULH` and `MUL` to capture high and low bits).
755 // - powerpc64 (Requires `MULHDU` and `MULLD` to capture high and low bits).
756 // - riscv64 (Requires `MUL` and `MULH` to capture high and low bits).
760 // Platforms where native 128-bit multiplication is not supported,
762 // sparc64 (`UMUL` only supports double-word arguments).
765 // These tests are run via `xcross`, my own library for C cross-compiling,
766 // which supports numerous targets (far in excess of Rust's tier 1 support,
767 // or rust-embedded/cross's list). xcross may be found here:
771 // `xcross gcc main.c -c -S -O3 --target $target`
773 // All 32-bit architectures inherently do not have support. That means
774 // we can essentially look for 64-bit architectures that are not SPARC.