From 14d83381e5f47b1bccf52fe31a8a81fda478648f Mon Sep 17 00:00:00 2001 From: ltdk Date: Fri, 11 Oct 2024 15:46:38 -0400 Subject: [PATCH] Add intrinsics for bigint helper methods --- .../rustc_codegen_gcc/src/intrinsic/mod.rs | 6 + compiler/rustc_codegen_llvm/src/intrinsic.rs | 77 ++++++- .../src/interpret/intrinsics.rs | 128 +++++++++++ .../rustc_const_eval/src/interpret/place.rs | 11 + compiler/rustc_const_eval/src/lib.rs | 1 + .../rustc_hir_analysis/src/check/intrinsic.rs | 27 +++ compiler/rustc_span/src/symbol.rs | 5 + library/core/src/intrinsics.rs | 74 +++++++ library/core/src/num/int_macros.rs | 144 +++++++++++- library/core/src/num/mod.rs | 170 +++------------ library/core/src/num/uint_macros.rs | 205 +++++++++++++++++- library/core/tests/num/i128.rs | 2 +- library/core/tests/num/i16.rs | 2 +- library/core/tests/num/i32.rs | 2 +- library/core/tests/num/i64.rs | 2 +- library/core/tests/num/i8.rs | 2 +- library/core/tests/num/int_macros.rs | 62 +++++- library/core/tests/num/uint_macros.rs | 21 ++ 18 files changed, 777 insertions(+), 164 deletions(-) diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs index 945eedf555667..58f87164a82d2 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs @@ -192,6 +192,12 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc | sym::prefetch_write_instruction => { unimplemented!(); } + #[cfg(not(bootstrap))] + sym::add_with_carry + | sym::sub_with_carry + | sym::mul_double + | sym::mul_double_add + | sym::mul_double_add2 => unimplemented!(), sym::ctlz | sym::ctlz_nonzero | sym::cttz diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 30c6f08e894b7..918f65d381dfd 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -347,7 +347,12 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { | sym::rotate_left | sym::rotate_right | sym::saturating_add - | sym::saturating_sub => { + | sym::saturating_sub + | sym::add_with_carry + | sym::sub_with_carry + | sym::mul_double + | sym::mul_double_add + | sym::mul_double_add2 => { let ty = arg_tys[0]; match int_type_width_signed(ty, self) { Some((width, signed)) => match name { @@ -417,6 +422,76 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { ); self.call_intrinsic(llvm_name, &[lhs, rhs]) } + sym::add_with_carry | sym::sub_with_carry => { + let llty = self.type_ix(width); + let is_add = name == sym::add_with_carry; + let lhs = args[0].immediate(); + let rhs = args[1].immediate(); + + // sign-extending the carry would treat it as -1, not 1 + let carry = self.intcast(args[2].immediate(), llty, false); + + let llvm_name = &format!( + "llvm.{}{}.with.overflow.i{}", + if signed { 's' } else { 'u' }, + if is_add { "add" } else { "sub" }, + width, + ); + + let ret = self.call_intrinsic(llvm_name, &[lhs, rhs]); + let agg = self.extract_value(ret, 0); + let overflow1 = self.extract_value(ret, 1); + + let ret = self.call_intrinsic(llvm_name, &[agg, carry]); + let agg = self.extract_value(ret, 0); + let overflow2 = self.extract_value(ret, 1); + + let overflow = if signed { + self.icmp(IntPredicate::IntNE, overflow1, overflow2) + } else { + self.or(overflow1, overflow2) + }; + + let holder = self.const_struct( + &[self.const_undef(llty), self.const_undef(self.type_i1())], + false, + ); + let holder = self.insert_value(holder, agg, 0); + let holder = self.insert_value(holder, overflow, 1); + holder + } + sym::mul_double | sym::mul_double_add | sym::mul_double_add2 => { + let single_ty = self.type_ix(width); + let double_ty = self.type_ix(width * 2); + let lhs = self.intcast(args[0].immediate(), double_ty, signed); + let rhs = self.intcast(args[1].immediate(), double_ty, signed); + let mut ret = self.mul(lhs, rhs); + if name == sym::mul_double_add || name == sym::mul_double_add2 { + let carry = self.intcast(args[2].immediate(), double_ty, signed); + ret = self.add(ret, carry) + } + if name == sym::mul_double_add2 { + let carry2 = self.intcast(args[3].immediate(), double_ty, signed); + ret = self.add(ret, carry2); + } + + // note: insignificant part is always treated as unsigned, even if we + // coerce it to signed in the final result to make the intrinsic + // signature simpler + let lo = self.intcast(ret, single_ty, signed); + + let bits = self.const_uint(double_ty, width); + let hi = self.ashr(ret, bits); + let hi = self.intcast(hi, single_ty, signed); + + let holder = self.const_struct( + &[self.const_undef(single_ty), self.const_undef(single_ty)], + false, + ); + let holder = self.insert_value(holder, lo, 0); + let holder = self.insert_value(holder, hi, 1); + holder + } _ => bug!(), }, None => { diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs index 52780cc6a3a6f..211ce2f2cda2e 100644 --- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs +++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs @@ -178,6 +178,34 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { )?; self.write_scalar(val, dest)?; } + sym::add_with_carry | sym::sub_with_carry => { + let l = self.read_immediate(&args[0])?; + let r = self.read_immediate(&args[1])?; + let c = self.read_immediate(&args[2])?; + let (val, overflowed) = self.carrying_arith( + if intrinsic_name == sym::add_with_carry { BinOp::Add } else { BinOp::Sub }, + &l, + &r, + &c, + )?; + self.write_scalar_pair(val, overflowed, dest)?; + } + sym::mul_double | sym::mul_double_add | sym::mul_double_add2 => { + let l = self.read_immediate(&args[0])?; + let r = self.read_immediate(&args[1])?; + let c1 = if intrinsic_name != sym::mul_double { + Some(self.read_immediate(&args[2])?) + } else { + None + }; + let c2 = if intrinsic_name == sym::mul_double_add2 { + Some(self.read_immediate(&args[3])?) + } else { + None + }; + let (lo, hi) = self.mul_double_add2(&l, &r, c1.as_ref(), c2.as_ref())?; + self.write_scalar_pair(lo, hi, dest)?; + } sym::discriminant_value => { let place = self.deref_pointer(&args[0])?; let variant = self.read_discriminant(&place)?; @@ -573,6 +601,106 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { }) } + pub fn carrying_arith( + &self, + mir_op: BinOp, + l: &ImmTy<'tcx, M::Provenance>, + r: &ImmTy<'tcx, M::Provenance>, + c: &ImmTy<'tcx, M::Provenance>, + ) -> InterpResult<'tcx, (Scalar, Scalar)> { + assert_eq!(l.layout.ty, r.layout.ty); + assert_matches!(l.layout.ty.kind(), ty::Int(..) | ty::Uint(..)); + assert_matches!(c.layout.ty.kind(), ty::Bool); + assert_matches!(mir_op, BinOp::Add | BinOp::Sub); + + let mir_op = mir_op.wrapping_to_overflowing().unwrap(); + + let (val, overflowed1) = self.binary_op(mir_op, l, r)?.to_scalar_pair(); + + let val = ImmTy::from_scalar(val, l.layout); + let c = ImmTy::from_scalar(c.to_scalar(), l.layout); + + let (val, overflowed2) = self.binary_op(mir_op, &val, &c)?.to_scalar_pair(); + + let overflowed1 = overflowed1.to_bool()?; + let overflowed2 = overflowed2.to_bool()?; + + let overflowed = Scalar::from_bool(if l.layout.abi.is_signed() { + overflowed1 != overflowed2 + } else { + overflowed1 | overflowed2 + }); + + interp_ok((val, overflowed)) + } + + pub fn mul_double_add2( + &self, + l: &ImmTy<'tcx, M::Provenance>, + r: &ImmTy<'tcx, M::Provenance>, + c1: Option<&ImmTy<'tcx, M::Provenance>>, + c2: Option<&ImmTy<'tcx, M::Provenance>>, + ) -> InterpResult<'tcx, (Scalar, Scalar)> { + assert_eq!(l.layout.ty, r.layout.ty); + assert_matches!(l.layout.ty.kind(), ty::Int(..) | ty::Uint(..)); + + let is_signed = l.layout.abi.is_signed(); + let size = l.layout.size; + let bits = size.bits(); + let l = l.to_scalar_int()?; + let r = r.to_scalar_int()?; + + interp_ok(if is_signed { + let l = l.to_int(size); + let r = r.to_int(size); + let c1 = c1.map_or(interp_ok(0), |c1| interp_ok(c1.to_scalar_int()?.to_int(size)))?; + let c2 = c2.map_or(interp_ok(0), |c2| interp_ok(c2.to_scalar_int()?.to_int(size)))?; + if bits == 128 { + #[cfg(bootstrap)] + { + let _ = (l, r, c1, c2); + unimplemented!() + } + #[cfg(not(bootstrap))] + { + let (lo, hi) = l.carrying2_mul(r, c1, c2); + let lo = Scalar::from_uint(lo, size); + let hi = Scalar::from_int(hi, size); + (lo, hi) + } + } else { + let prod = l * r + c1 + c2; + let lo = Scalar::from_int(prod, size); + let hi = Scalar::from_int(prod >> size.bits(), size); + (lo, hi) + } + } else { + let l = l.to_uint(size); + let r = r.to_uint(size); + let c1 = c1.map_or(interp_ok(0), |c1| interp_ok(c1.to_scalar_int()?.to_uint(size)))?; + let c2 = c2.map_or(interp_ok(0), |c2| interp_ok(c2.to_scalar_int()?.to_uint(size)))?; + if bits == 128 { + #[cfg(bootstrap)] + { + let _ = (l, r, c1, c2); + unimplemented!() + } + #[cfg(not(bootstrap))] + { + let (lo, hi) = l.carrying2_mul(r, c1, c2); + let lo = Scalar::from_uint(lo, size); + let hi = Scalar::from_uint(hi, size); + (lo, hi) + } + } else { + let prod = l * r + c1 + c2; + let lo = Scalar::from_uint(prod, size); + let hi = Scalar::from_uint(prod >> size.bits(), size); + (lo, hi) + } + }) + } + /// Offsets a pointer by some multiple of its type, returning an error if the pointer leaves its /// allocation. pub fn ptr_offset_inbounds( diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs index 81b926a1b65fa..a717f5427d296 100644 --- a/compiler/rustc_const_eval/src/interpret/place.rs +++ b/compiler/rustc_const_eval/src/interpret/place.rs @@ -626,6 +626,17 @@ where self.write_immediate(Immediate::Scalar(val.into()), dest) } + /// Write a scalar pair to a place + #[inline(always)] + pub fn write_scalar_pair( + &mut self, + val1: impl Into>, + val2: impl Into>, + dest: &impl Writeable<'tcx, M::Provenance>, + ) -> InterpResult<'tcx> { + self.write_immediate(Immediate::ScalarPair(val1.into(), val2.into()), dest) + } + /// Write a pointer to a place #[inline(always)] pub fn write_pointer( diff --git a/compiler/rustc_const_eval/src/lib.rs b/compiler/rustc_const_eval/src/lib.rs index cbe8a043fba00..490cda41ecf40 100644 --- a/compiler/rustc_const_eval/src/lib.rs +++ b/compiler/rustc_const_eval/src/lib.rs @@ -5,6 +5,7 @@ #![cfg_attr(not(bootstrap), warn(unqualified_local_imports))] #![doc(rust_logo)] #![feature(assert_matches)] +#![feature(bigint_helper_methods)] #![feature(box_patterns)] #![feature(decl_macro)] #![feature(if_let_guard)] diff --git a/compiler/rustc_hir_analysis/src/check/intrinsic.rs b/compiler/rustc_hir_analysis/src/check/intrinsic.rs index 25e219ef3f23a..36aca1a9e7943 100644 --- a/compiler/rustc_hir_analysis/src/check/intrinsic.rs +++ b/compiler/rustc_hir_analysis/src/check/intrinsic.rs @@ -103,6 +103,11 @@ pub fn intrinsic_operation_unsafety(tcx: TyCtxt<'_>, intrinsic_id: LocalDefId) - | sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow + | sym::add_with_carry + | sym::sub_with_carry + | sym::mul_double + | sym::mul_double_add + | sym::mul_double_add2 | sym::wrapping_add | sym::wrapping_sub | sym::wrapping_mul @@ -433,6 +438,28 @@ pub fn check_intrinsic_type( (1, 0, vec![param(0), param(0)], Ty::new_tup(tcx, &[param(0), tcx.types.bool])) } + sym::add_with_carry | sym::sub_with_carry => ( + 1, + 0, + vec![param(0), param(0), tcx.types.bool], + Ty::new_tup(tcx, &[param(0), tcx.types.bool]), + ), + + sym::mul_double => { + (1, 0, vec![param(0), param(0)], Ty::new_tup(tcx, &[param(0), param(0)])) + } + + sym::mul_double_add => { + (1, 0, vec![param(0), param(0), param(0)], Ty::new_tup(tcx, &[param(0), param(0)])) + } + + sym::mul_double_add2 => ( + 1, + 0, + vec![param(0), param(0), param(0), param(0)], + Ty::new_tup(tcx, &[param(0), param(0)]), + ), + sym::ptr_guaranteed_cmp => ( 1, 0, diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs index 1527600e764c0..42e7c66b57052 100644 --- a/compiler/rustc_span/src/symbol.rs +++ b/compiler/rustc_span/src/symbol.rs @@ -377,6 +377,7 @@ symbols! { abort, add, add_assign, + add_with_carry, add_with_overflow, address, adt_const_params, @@ -1276,6 +1277,9 @@ symbols! { move_size_limit, mul, mul_assign, + mul_double, + mul_double_add, + mul_double_add2, mul_with_overflow, multiple_supertrait_upcastable, must_not_suspend, @@ -1918,6 +1922,7 @@ symbols! { structural_peq, sub, sub_assign, + sub_with_carry, sub_with_overflow, suggestion, surface_async_drop_in_place, diff --git a/library/core/src/intrinsics.rs b/library/core/src/intrinsics.rs index d7a2f1909cabe..5db9f0ed3b40d 100644 --- a/library/core/src/intrinsics.rs +++ b/library/core/src/intrinsics.rs @@ -2444,6 +2444,80 @@ extern "rust-intrinsic" { #[rustc_nounwind] pub fn mul_with_overflow(x: T, y: T) -> (T, bool); + /// Performs integer addition with an explicit carry flag. + /// + /// Note that, unlike most intrinsics, this is safe to call; + /// it does not require an `unsafe` block. + /// Therefore, implementations must not require the user to uphold + /// any safety invariants. + /// + /// This intrinsic does not have a stable counterpart. + #[cfg(not(bootstrap))] + #[rustc_const_unstable(feature = "const_bigint_helper_methods", issue = "85532")] + #[unstable(feature = "core_intrinsics", issue = "none")] + #[rustc_safe_intrinsic] + #[rustc_nounwind] + pub fn add_with_carry(x: T, y: T, z: bool) -> (T, bool); + + /// Performs integer subtraction with an explicit carry flag. + /// + /// Note that, unlike most intrinsics, this is safe to call; + /// it does not require an `unsafe` block. + /// Therefore, implementations must not require the user to uphold + /// any safety invariants. + /// + /// This intrinsic does not have a stable counterpart. + #[cfg(not(bootstrap))] + #[rustc_const_unstable(feature = "const_bigint_helper_methods", issue = "85532")] + #[unstable(feature = "core_intrinsics", issue = "none")] + #[rustc_safe_intrinsic] + #[rustc_nounwind] + pub fn sub_with_carry(x: T, y: T, z: bool) -> (T, bool); + + /// Performs double-wide integer multiplication. + /// + /// Note that, unlike most intrinsics, this is safe to call; + /// it does not require an `unsafe` block. + /// Therefore, implementations must not require the user to uphold + /// any safety invariants. + /// + /// This intrinsic does not have a stable counterpart. + #[cfg(not(bootstrap))] + #[rustc_const_unstable(feature = "const_bigint_helper_methods", issue = "85532")] + #[unstable(feature = "core_intrinsics", issue = "none")] + #[rustc_safe_intrinsic] + #[rustc_nounwind] + pub fn mul_double(x: T, y: T) -> (T, T); + + /// Performs double-wide integer multiplication with an extra addition. + /// + /// Note that, unlike most intrinsics, this is safe to call; + /// it does not require an `unsafe` block. + /// Therefore, implementations must not require the user to uphold + /// any safety invariants. + /// + /// This intrinsic does not have a stable counterpart. + #[cfg(not(bootstrap))] + #[rustc_const_unstable(feature = "const_bigint_helper_methods", issue = "85532")] + #[unstable(feature = "core_intrinsics", issue = "none")] + #[rustc_safe_intrinsic] + #[rustc_nounwind] + pub fn mul_double_add(x: T, y: T, z: T) -> (T, T); + /// Performs double-wide integer multiplication with an extra two additions. + /// + /// Note that, unlike most intrinsics, this is safe to call; + /// it does not require an `unsafe` block. + /// Therefore, implementations must not require the user to uphold + /// any safety invariants. + /// + /// This intrinsic does not have a stable counterpart. + #[cfg(not(bootstrap))] + #[rustc_const_unstable(feature = "const_bigint_helper_methods", issue = "85532")] + #[unstable(feature = "core_intrinsics", issue = "none")] + #[rustc_safe_intrinsic] + #[rustc_nounwind] + pub fn mul_double_add2(x: T, y: T, z: T, w: T) -> (T, T); + /// Performs an exact division, resulting in undefined behavior where /// `x % y != 0` or `y == 0` or `x == T::MIN && y == -1` /// diff --git a/library/core/src/num/int_macros.rs b/library/core/src/num/int_macros.rs index 7241b3ff6a3b7..915edbaac0127 100644 --- a/library/core/src/num/int_macros.rs +++ b/library/core/src/num/int_macros.rs @@ -2376,11 +2376,16 @@ macro_rules! int_impl { without modifying the original"] #[inline] pub const fn carrying_add(self, rhs: Self, carry: bool) -> (Self, bool) { - // note: longer-term this should be done via an intrinsic. - // note: no intermediate overflow is required (https://github.com/rust-lang/rust/issues/85532#issuecomment-1032214946). - let (a, b) = self.overflowing_add(rhs); - let (c, d) = a.overflowing_add(carry as $SelfT); - (c, b != d) + #[cfg(bootstrap)] + { + let (a, b) = self.overflowing_add(rhs); + let (c, d) = a.overflowing_add(carry as $SelfT); + (c, b != d) + } + #[cfg(not(bootstrap))] + { + intrinsics::add_with_carry(self, rhs, carry) + } } /// Calculates `self` + `rhs` with an unsigned `rhs`. @@ -2484,11 +2489,16 @@ macro_rules! int_impl { without modifying the original"] #[inline] pub const fn borrowing_sub(self, rhs: Self, borrow: bool) -> (Self, bool) { - // note: longer-term this should be done via an intrinsic. - // note: no intermediate overflow is required (https://github.com/rust-lang/rust/issues/85532#issuecomment-1032214946). - let (a, b) = self.overflowing_sub(rhs); - let (c, d) = a.overflowing_sub(borrow as $SelfT); - (c, b != d) + #[cfg(bootstrap)] + { + let (a, b) = self.overflowing_sub(rhs); + let (c, d) = a.overflowing_sub(borrow as $SelfT); + (c, b != d) + } + #[cfg(not(bootstrap))] + { + intrinsics::sub_with_carry(self, rhs, borrow) + } } /// Calculates `self` - `rhs` with an unsigned `rhs` @@ -2540,6 +2550,120 @@ macro_rules! int_impl { (a as Self, b) } + /// Calculates the complete product `self * rhs` without the possibility to overflow. + /// + /// This returns the low-order (wrapping) bits and the high-order (overflow) bits + /// of the result as two separate values, in that order. + /// + /// If you also need to add a carry to the wide result, then you want + /// [`Self::carrying_mul`] instead. + /// + /// # Examples + /// + /// Basic usage: + /// + /// Please note that this example is shared between integer types. + /// Which explains why `i32` is used here. + /// + /// ``` + /// #![feature(bigint_helper_methods)] + /// assert_eq!(5i32.widening_mul(-2), (4294967286, -1)); + /// assert_eq!(1_000_000_000i32.widening_mul(-10), (2884901888, -3)); + /// ``` + #[unstable(feature = "bigint_helper_methods", issue = "85532")] + #[rustc_const_unstable(feature = "const_bigint_helper_methods", issue = "85532")] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + #[cfg(not(bootstrap))] + pub const fn widening_mul(self, rhs: Self) -> ($UnsignedT, Self) { + let (lo, hi) = intrinsics::mul_double(self, rhs); + (lo as $UnsignedT, hi) + } + + /// Calculates the "full multiplication" `self * rhs + carry` + /// without the possibility to overflow. + /// + /// This returns the low-order (wrapping) bits and the high-order (overflow) bits + /// of the result as two separate values, in that order. + /// + /// Performs "long multiplication" which takes in an extra amount to add, and may return an + /// additional amount of overflow. This allows for chaining together multiple + /// multiplications to create "big integers" which represent larger values. + /// + /// If you don't need the `carry`, then you can use [`Self::widening_mul`] instead. + /// + /// # Examples + /// + /// Basic usage: + /// + /// Please note that this example is shared between integer types. + /// Which explains why `i32` is used here. + /// + /// ``` + /// #![feature(bigint_helper_methods)] + /// assert_eq!(5i32.carrying_mul(-2, 0), (4294967286, -1)); + /// assert_eq!(5i32.carrying_mul(-2, 10), (0, 0)); + /// assert_eq!(1_000_000_000i32.carrying_mul(-10, 0), (2884901888, -3)); + /// assert_eq!(1_000_000_000i32.carrying_mul(-10, 10), (2884901898, -3)); + #[doc = concat!("assert_eq!(", + stringify!($SelfT), "::MAX.carrying_mul(", stringify!($SelfT), "::MAX, ", stringify!($SelfT), "::MAX), ", + "(", stringify!($SelfT), "::MAX.unsigned_abs() + 1, ", stringify!($SelfT), "::MAX / 2));" + )] + /// ``` + #[unstable(feature = "bigint_helper_methods", issue = "85532")] + #[rustc_const_unstable(feature = "bigint_helper_methods", issue = "85532")] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + #[cfg(not(bootstrap))] + pub const fn carrying_mul(self, rhs: Self, carry: Self) -> ($UnsignedT, Self) { + let (lo, hi) = intrinsics::mul_double_add(self, rhs, carry); + (lo as $UnsignedT, hi) + } + + /// Calculates the "full multiplication" `self * rhs + carry1 + carry2` + /// without the possibility to overflow. + /// + /// This returns the low-order (wrapping) bits and the high-order (overflow) bits + /// of the result as two separate values, in that order. + /// + /// Performs "long multiplication" which takes in an extra amount to add, and may return an + /// additional amount of overflow. This allows for chaining together multiple + /// multiplications to create "big integers" which represent larger values. + /// + /// If you don't need either `carry`, then you can use [`Self::widening_mul`] instead, + /// and if you only need one `carry`, then you can use [`Self::carrying_mul`] instead. + /// + /// # Examples + /// + /// Basic usage: + /// + /// Please note that this example is shared between integer types. + /// Which explains why `i32` is used here. + /// + /// ``` + /// #![feature(bigint_helper_methods)] + /// assert_eq!(5i32.carrying2_mul(-2, 0, 0), (4294967286, -1)); + /// assert_eq!(5i32.carrying2_mul(-2, 10, 10), (10, 0)); + /// assert_eq!(1_000_000_000i32.carrying2_mul(-10, 0, 0), (2884901888, -3)); + /// assert_eq!(1_000_000_000i32.carrying2_mul(-10, 10, 10), (2884901908, -3)); + #[doc = concat!("assert_eq!(", + stringify!($SelfT), "::MAX.carrying2_mul(", stringify!($SelfT), "::MAX, ", stringify!($SelfT), "::MAX, ", stringify!($SelfT), "::MAX), ", + "(", stringify!($UnsignedT), "::MAX, ", stringify!($SelfT), "::MAX / 2));" + )] + /// ``` + #[unstable(feature = "bigint_helper_methods", issue = "85532")] + #[rustc_const_unstable(feature = "bigint_helper_methods", issue = "85532")] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + #[cfg(not(bootstrap))] + pub const fn carrying2_mul(self, rhs: Self, carry1: Self, carry2: Self) -> ($UnsignedT, Self) { + let (lo, hi) = intrinsics::mul_double_add2(self, rhs, carry1, carry2); + (lo as $UnsignedT, hi) + } + /// Calculates the divisor when `self` is divided by `rhs`. /// /// Returns a tuple of the divisor along with a boolean indicating whether an arithmetic overflow would diff --git a/library/core/src/num/mod.rs b/library/core/src/num/mod.rs index 31e35015d2de1..bf3136ddfad9a 100644 --- a/library/core/src/num/mod.rs +++ b/library/core/src/num/mod.rs @@ -156,136 +156,6 @@ macro_rules! midpoint_impl { }; } -macro_rules! widening_impl { - ($SelfT:ty, $WideT:ty, $BITS:literal, unsigned) => { - /// Calculates the complete product `self * rhs` without the possibility to overflow. - /// - /// This returns the low-order (wrapping) bits and the high-order (overflow) bits - /// of the result as two separate values, in that order. - /// - /// If you also need to add a carry to the wide result, then you want - /// [`Self::carrying_mul`] instead. - /// - /// # Examples - /// - /// Basic usage: - /// - /// Please note that this example is shared between integer types. - /// Which explains why `u32` is used here. - /// - /// ``` - /// #![feature(bigint_helper_methods)] - /// assert_eq!(5u32.widening_mul(2), (10, 0)); - /// assert_eq!(1_000_000_000u32.widening_mul(10), (1410065408, 2)); - /// ``` - #[unstable(feature = "bigint_helper_methods", issue = "85532")] - #[rustc_const_unstable(feature = "const_bigint_helper_methods", issue = "85532")] - #[must_use = "this returns the result of the operation, \ - without modifying the original"] - #[inline] - pub const fn widening_mul(self, rhs: Self) -> (Self, Self) { - // note: longer-term this should be done via an intrinsic, - // but for now we can deal without an impl for u128/i128 - // SAFETY: overflow will be contained within the wider types - let wide = unsafe { (self as $WideT).unchecked_mul(rhs as $WideT) }; - (wide as $SelfT, (wide >> $BITS) as $SelfT) - } - - /// Calculates the "full multiplication" `self * rhs + carry` - /// without the possibility to overflow. - /// - /// This returns the low-order (wrapping) bits and the high-order (overflow) bits - /// of the result as two separate values, in that order. - /// - /// Performs "long multiplication" which takes in an extra amount to add, and may return an - /// additional amount of overflow. This allows for chaining together multiple - /// multiplications to create "big integers" which represent larger values. - /// - /// If you don't need the `carry`, then you can use [`Self::widening_mul`] instead. - /// - /// # Examples - /// - /// Basic usage: - /// - /// Please note that this example is shared between integer types. - /// Which explains why `u32` is used here. - /// - /// ``` - /// #![feature(bigint_helper_methods)] - /// assert_eq!(5u32.carrying_mul(2, 0), (10, 0)); - /// assert_eq!(5u32.carrying_mul(2, 10), (20, 0)); - /// assert_eq!(1_000_000_000u32.carrying_mul(10, 0), (1410065408, 2)); - /// assert_eq!(1_000_000_000u32.carrying_mul(10, 10), (1410065418, 2)); - #[doc = concat!("assert_eq!(", - stringify!($SelfT), "::MAX.carrying_mul(", stringify!($SelfT), "::MAX, ", stringify!($SelfT), "::MAX), ", - "(0, ", stringify!($SelfT), "::MAX));" - )] - /// ``` - /// - /// This is the core operation needed for scalar multiplication when - /// implementing it for wider-than-native types. - /// - /// ``` - /// #![feature(bigint_helper_methods)] - /// fn scalar_mul_eq(little_endian_digits: &mut Vec, multiplicand: u16) { - /// let mut carry = 0; - /// for d in little_endian_digits.iter_mut() { - /// (*d, carry) = d.carrying_mul(multiplicand, carry); - /// } - /// if carry != 0 { - /// little_endian_digits.push(carry); - /// } - /// } - /// - /// let mut v = vec![10, 20]; - /// scalar_mul_eq(&mut v, 3); - /// assert_eq!(v, [30, 60]); - /// - /// assert_eq!(0x87654321_u64 * 0xFEED, 0x86D3D159E38D); - /// let mut v = vec![0x4321, 0x8765]; - /// scalar_mul_eq(&mut v, 0xFEED); - /// assert_eq!(v, [0xE38D, 0xD159, 0x86D3]); - /// ``` - /// - /// If `carry` is zero, this is similar to [`overflowing_mul`](Self::overflowing_mul), - /// except that it gives the value of the overflow instead of just whether one happened: - /// - /// ``` - /// #![feature(bigint_helper_methods)] - /// let r = u8::carrying_mul(7, 13, 0); - /// assert_eq!((r.0, r.1 != 0), u8::overflowing_mul(7, 13)); - /// let r = u8::carrying_mul(13, 42, 0); - /// assert_eq!((r.0, r.1 != 0), u8::overflowing_mul(13, 42)); - /// ``` - /// - /// The value of the first field in the returned tuple matches what you'd get - /// by combining the [`wrapping_mul`](Self::wrapping_mul) and - /// [`wrapping_add`](Self::wrapping_add) methods: - /// - /// ``` - /// #![feature(bigint_helper_methods)] - /// assert_eq!( - /// 789_u16.carrying_mul(456, 123).0, - /// 789_u16.wrapping_mul(456).wrapping_add(123), - /// ); - /// ``` - #[unstable(feature = "bigint_helper_methods", issue = "85532")] - #[rustc_const_unstable(feature = "bigint_helper_methods", issue = "85532")] - #[must_use = "this returns the result of the operation, \ - without modifying the original"] - #[inline] - pub const fn carrying_mul(self, rhs: Self, carry: Self) -> (Self, Self) { - // note: longer-term this should be done via an intrinsic, - // but for now we can deal without an impl for u128/i128 - // SAFETY: overflow will be contained within the wider types - let wide = unsafe { - (self as $WideT).unchecked_mul(rhs as $WideT).unchecked_add(carry as $WideT) - }; - (wide as $SelfT, (wide >> $BITS) as $SelfT) - } - }; -} - impl i8 { int_impl! { Self = i8, @@ -478,6 +348,39 @@ impl isize { /// If the 6th bit is set ascii is lower case. const ASCII_CASE_MASK: u8 = 0b0010_0000; +// NOTE: this only exists because bootstrap does not have the intrinsic but the code is still used +#[cfg(bootstrap)] +macro_rules! widening_mul { + ($lo:ident) => {}; + ($lo:ident $hi:ident $($rest:ident)*) => { + widening_mul!($hi $($rest)*); + + impl $lo { + const fn widening_mul_impl(self, rhs: $lo) -> ($lo, $lo) { + let wide = (self as $hi) * (rhs as $hi); + let lo = wide as $lo; + let hi = (wide >> <$lo>::BITS) as $lo; + (lo, hi) + } + } + }; +} +#[cfg(bootstrap)] +widening_mul!(u8 u16 u32 u64 u128); +// NOTE: but the bootstrap doesn't use these, so, we can cheat :) +#[cfg(bootstrap)] +impl u128 { + const fn widening_mul_impl(self, _: u128) -> (u128, u128) { + unimplemented!() + } +} +#[cfg(bootstrap)] +impl usize { + const fn widening_mul_impl(self, _: usize) -> (usize, usize) { + unimplemented!() + } +} + impl u8 { uint_impl! { Self = u8, @@ -498,7 +401,6 @@ impl u8 { from_xe_bytes_doc = "", bound_condition = "", } - widening_impl! { u8, u16, 8, unsigned } midpoint_impl! { u8, u16, unsigned } /// Checks if the value is within the ASCII range. @@ -1115,7 +1017,6 @@ impl u16 { from_xe_bytes_doc = "", bound_condition = "", } - widening_impl! { u16, u32, 16, unsigned } midpoint_impl! { u16, u32, unsigned } /// Checks if the value is a Unicode surrogate code point, which are disallowed values for [`char`]. @@ -1164,7 +1065,6 @@ impl u32 { from_xe_bytes_doc = "", bound_condition = "", } - widening_impl! { u32, u64, 32, unsigned } midpoint_impl! { u32, u64, unsigned } } @@ -1188,7 +1088,6 @@ impl u64 { from_xe_bytes_doc = "", bound_condition = "", } - widening_impl! { u64, u128, 64, unsigned } midpoint_impl! { u64, u128, unsigned } } @@ -1238,7 +1137,6 @@ impl usize { from_xe_bytes_doc = usize_isize_from_xe_bytes_doc!(), bound_condition = " on 16-bit targets", } - widening_impl! { usize, u32, 16, unsigned } midpoint_impl! { usize, u32, unsigned } } @@ -1263,7 +1161,6 @@ impl usize { from_xe_bytes_doc = usize_isize_from_xe_bytes_doc!(), bound_condition = " on 32-bit targets", } - widening_impl! { usize, u64, 32, unsigned } midpoint_impl! { usize, u64, unsigned } } @@ -1288,7 +1185,6 @@ impl usize { from_xe_bytes_doc = usize_isize_from_xe_bytes_doc!(), bound_condition = " on 64-bit targets", } - widening_impl! { usize, u128, 64, unsigned } midpoint_impl! { usize, u128, unsigned } } diff --git a/library/core/src/num/uint_macros.rs b/library/core/src/num/uint_macros.rs index d9036abecc592..7afda7954dfab 100644 --- a/library/core/src/num/uint_macros.rs +++ b/library/core/src/num/uint_macros.rs @@ -2297,11 +2297,16 @@ macro_rules! uint_impl { without modifying the original"] #[inline] pub const fn carrying_add(self, rhs: Self, carry: bool) -> (Self, bool) { - // note: longer-term this should be done via an intrinsic, but this has been shown - // to generate optimal code for now, and LLVM doesn't have an equivalent intrinsic - let (a, b) = self.overflowing_add(rhs); - let (c, d) = a.overflowing_add(carry as $SelfT); - (c, b || d) + #[cfg(bootstrap)] + { + let (a, b) = self.overflowing_add(rhs); + let (c, d) = a.overflowing_add(carry as $SelfT); + (c, b || d) + } + #[cfg(not(bootstrap))] + { + intrinsics::add_with_carry(self, rhs, carry) + } } /// Calculates `self` + `rhs` with a signed `rhs`. @@ -2389,11 +2394,16 @@ macro_rules! uint_impl { without modifying the original"] #[inline] pub const fn borrowing_sub(self, rhs: Self, borrow: bool) -> (Self, bool) { - // note: longer-term this should be done via an intrinsic, but this has been shown - // to generate optimal code for now, and LLVM doesn't have an equivalent intrinsic - let (a, b) = self.overflowing_sub(rhs); - let (c, d) = a.overflowing_sub(borrow as $SelfT); - (c, b || d) + #[cfg(bootstrap)] + { + let (a, b) = self.overflowing_sub(rhs); + let (c, d) = a.overflowing_sub(borrow as $SelfT); + (c, b || d) + } + #[cfg(not(bootstrap))] + { + intrinsics::sub_with_carry(self, rhs, borrow) + } } /// Computes the absolute difference between `self` and `other`. @@ -2452,6 +2462,181 @@ macro_rules! uint_impl { (a as Self, b) } + + /// Calculates the complete product `self * rhs` without the possibility to overflow. + /// + /// This returns the low-order (wrapping) bits and the high-order (overflow) bits + /// of the result as two separate values, in that order. + /// + /// If you also need to add a carry to the wide result, then you want + /// [`Self::carrying_mul`] instead. + /// + /// # Examples + /// + /// Basic usage: + /// + /// Please note that this example is shared between integer types. + /// Which explains why `u32` is used here. + /// + /// ``` + /// #![feature(bigint_helper_methods)] + /// assert_eq!(5u32.widening_mul(2), (10, 0)); + /// assert_eq!(1_000_000_000u32.widening_mul(10), (1410065408, 2)); + /// ``` + #[unstable(feature = "bigint_helper_methods", issue = "85532")] + #[rustc_const_unstable(feature = "const_bigint_helper_methods", issue = "85532")] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + pub const fn widening_mul(self, rhs: Self) -> (Self, Self) { + #[cfg(bootstrap)] + { + self.widening_mul_impl(rhs) + } + #[cfg(not(bootstrap))] + { + intrinsics::mul_double(self, rhs) + } + } + + /// Calculates the "full multiplication" `self * rhs + carry` + /// without the possibility to overflow. + /// + /// This returns the low-order (wrapping) bits and the high-order (overflow) bits + /// of the result as two separate values, in that order. + /// + /// Performs "long multiplication" which takes in an extra amount to add, and may return an + /// additional amount of overflow. This allows for chaining together multiple + /// multiplications to create "big integers" which represent larger values. + /// + /// If you don't need the `carry`, then you can use [`Self::widening_mul`] instead. + /// + /// # Examples + /// + /// Basic usage: + /// + /// Please note that this example is shared between integer types. + /// Which explains why `u32` is used here. + /// + /// ``` + /// #![feature(bigint_helper_methods)] + /// assert_eq!(5u32.carrying_mul(2, 0), (10, 0)); + /// assert_eq!(5u32.carrying_mul(2, 10), (20, 0)); + /// assert_eq!(1_000_000_000u32.carrying_mul(10, 0), (1410065408, 2)); + /// assert_eq!(1_000_000_000u32.carrying_mul(10, 10), (1410065418, 2)); + #[cfg_attr(not(bootstrap), doc = concat!("assert_eq!(", + stringify!($SelfT), "::MAX.carrying_mul(", stringify!($SelfT), "::MAX, ", stringify!($SelfT), "::MAX), ", + "(0, ", stringify!($SelfT), "::MAX));" + ))] + /// ``` + /// + /// This is a core operation needed for scalar multiplication when + /// implementing it for wider-than-native types. + /// + /// ``` + /// #![feature(bigint_helper_methods)] + /// fn scalar_mul_eq(little_endian_digits: &mut Vec, multiplicand: u16) { + /// let mut carry = 0; + /// for d in little_endian_digits.iter_mut() { + /// (*d, carry) = d.carrying_mul(multiplicand, carry); + /// } + /// if carry != 0 { + /// little_endian_digits.push(carry); + /// } + /// } + /// + /// let mut v = vec![10, 20]; + /// scalar_mul_eq(&mut v, 3); + /// assert_eq!(v, [30, 60]); + /// + /// assert_eq!(0x87654321_u64 * 0xFEED, 0x86D3D159E38D); + /// let mut v = vec![0x4321, 0x8765]; + /// scalar_mul_eq(&mut v, 0xFEED); + /// assert_eq!(v, [0xE38D, 0xD159, 0x86D3]); + /// ``` + /// + /// If `carry` is zero, this is similar to [`overflowing_mul`](Self::overflowing_mul), + /// except that it gives the value of the overflow instead of just whether one happened: + /// + /// ``` + /// #![feature(bigint_helper_methods)] + /// let r = u8::carrying_mul(7, 13, 0); + /// assert_eq!((r.0, r.1 != 0), u8::overflowing_mul(7, 13)); + /// let r = u8::carrying_mul(13, 42, 0); + /// assert_eq!((r.0, r.1 != 0), u8::overflowing_mul(13, 42)); + /// ``` + /// + /// The value of the first field in the returned tuple matches what you'd get + /// by combining the [`wrapping_mul`](Self::wrapping_mul) and + /// [`wrapping_add`](Self::wrapping_add) methods: + /// + /// ``` + /// #![feature(bigint_helper_methods)] + /// assert_eq!( + /// 789_u16.carrying_mul(456, 123).0, + /// 789_u16.wrapping_mul(456).wrapping_add(123), + /// ); + /// ``` + #[unstable(feature = "bigint_helper_methods", issue = "85532")] + #[rustc_const_unstable(feature = "bigint_helper_methods", issue = "85532")] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + pub const fn carrying_mul(self, rhs: Self, carry: Self) -> (Self, Self) { + #[cfg(bootstrap)] + { + let (lo, hi) = self.widening_mul(rhs); + let (lo, carry) = lo.overflowing_add(carry); + let hi = hi + (carry as $SelfT); + (lo, hi) + } + #[cfg(not(bootstrap))] + { + intrinsics::mul_double_add(self, rhs, carry) + } + } + + /// Calculates the "full multiplication" `self * rhs + carry1 + carry2` + /// without the possibility to overflow. + /// + /// This returns the low-order (wrapping) bits and the high-order (overflow) bits + /// of the result as two separate values, in that order. + /// + /// Performs "long multiplication" which takes in an extra amount to add, and may return an + /// additional amount of overflow. This allows for chaining together multiple + /// multiplications to create "big integers" which represent larger values. + /// + /// If you don't need either `carry`, then you can use [`Self::widening_mul`] instead, + /// and if you only need one `carry`, then you can use [`Self::carrying_mul`] instead. + /// + /// # Examples + /// + /// Basic usage: + /// + /// Please note that this example is shared between integer types. + /// Which explains why `u32` is used here. + /// + /// ``` + /// #![feature(bigint_helper_methods)] + /// assert_eq!(5u32.carrying2_mul(2, 0, 0), (10, 0)); + /// assert_eq!(5u32.carrying2_mul(2, 10, 10), (30, 0)); + /// assert_eq!(1_000_000_000u32.carrying2_mul(10, 0, 0), (1410065408, 2)); + /// assert_eq!(1_000_000_000u32.carrying2_mul(10, 10, 10), (1410065428, 2)); + #[doc = concat!("assert_eq!(", + stringify!($SelfT), "::MAX.carrying2_mul(", stringify!($SelfT), "::MAX, ", stringify!($SelfT), "::MAX, ", stringify!($SelfT), "::MAX), ", + "(", stringify!($SelfT), "::MAX, ", stringify!($SelfT), "::MAX));" + )] + /// ``` + #[unstable(feature = "bigint_helper_methods", issue = "85532")] + #[rustc_const_unstable(feature = "bigint_helper_methods", issue = "85532")] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + #[cfg(not(bootstrap))] + pub const fn carrying2_mul(self, rhs: Self, carry1: Self, carry2: Self) -> (Self, Self) { + intrinsics::mul_double_add2(self, rhs, carry1, carry2) + } + /// Calculates the divisor when `self` is divided by `rhs`. /// /// Returns a tuple of the divisor along with a boolean indicating diff --git a/library/core/tests/num/i128.rs b/library/core/tests/num/i128.rs index 1ddd20f33d0b1..745fee05164c9 100644 --- a/library/core/tests/num/i128.rs +++ b/library/core/tests/num/i128.rs @@ -1 +1 @@ -int_module!(i128); +int_module!(i128, u128); diff --git a/library/core/tests/num/i16.rs b/library/core/tests/num/i16.rs index c7aa9fff964ed..6acb8371b87d8 100644 --- a/library/core/tests/num/i16.rs +++ b/library/core/tests/num/i16.rs @@ -1 +1 @@ -int_module!(i16); +int_module!(i16, u16); diff --git a/library/core/tests/num/i32.rs b/library/core/tests/num/i32.rs index efd5b1596a80d..38d5071f71d6c 100644 --- a/library/core/tests/num/i32.rs +++ b/library/core/tests/num/i32.rs @@ -1,4 +1,4 @@ -int_module!(i32); +int_module!(i32, u32); #[test] fn test_arith_operation() { diff --git a/library/core/tests/num/i64.rs b/library/core/tests/num/i64.rs index 93d23c10adf7e..f8dd5f9be7fe2 100644 --- a/library/core/tests/num/i64.rs +++ b/library/core/tests/num/i64.rs @@ -1 +1 @@ -int_module!(i64); +int_module!(i64, u64); diff --git a/library/core/tests/num/i8.rs b/library/core/tests/num/i8.rs index 887d4f17d25ff..a10906618c937 100644 --- a/library/core/tests/num/i8.rs +++ b/library/core/tests/num/i8.rs @@ -1 +1 @@ -int_module!(i8); +int_module!(i8, u8); diff --git a/library/core/tests/num/int_macros.rs b/library/core/tests/num/int_macros.rs index 6a26bd15a663d..0505883c09229 100644 --- a/library/core/tests/num/int_macros.rs +++ b/library/core/tests/num/int_macros.rs @@ -1,5 +1,5 @@ macro_rules! int_module { - ($T:ident) => { + ($T:ident, $U:ident) => { use core::ops::{BitAnd, BitOr, BitXor, Not, Shl, Shr}; use core::$T::*; @@ -365,6 +365,66 @@ macro_rules! int_module { assert_eq!((0 as $T).borrowing_sub($T::MIN, true), ($T::MAX, false)); } + #[test] + #[cfg(not(bootstrap))] + fn test_widening_mul() { + assert_eq!($T::MAX.widening_mul($T::MAX), (1, $T::MAX / 2)); + assert_eq!($T::MIN.widening_mul($T::MAX), ($T::MIN as $U, $T::MIN / 2)); + assert_eq!($T::MIN.widening_mul($T::MIN), (0, $T::MAX / 2 + 1)); + } + + #[test] + #[cfg(not(bootstrap))] + fn test_carrying_mul() { + assert_eq!($T::MAX.carrying_mul($T::MAX, 0), (1, $T::MAX / 2)); + assert_eq!($T::MAX.carrying_mul($T::MAX, $T::MAX), ($U::MAX / 2 + 1, $T::MAX / 2)); + assert_eq!($T::MAX.carrying_mul($T::MAX, $T::MIN), ($U::MAX / 2 + 2, $T::MAX / 2 - 1)); + assert_eq!($T::MIN.carrying_mul($T::MAX, 0), ($T::MIN as $U, $T::MIN / 2)); + assert_eq!($T::MIN.carrying_mul($T::MAX, $T::MAX), ($U::MAX, $T::MIN / 2)); + assert_eq!($T::MIN.carrying_mul($T::MAX, $T::MIN), (0, $T::MIN / 2)); + assert_eq!($T::MIN.carrying_mul($T::MIN, 0), (0, $T::MAX / 2 + 1)); + assert_eq!($T::MIN.carrying_mul($T::MIN, $T::MAX), ($U::MAX / 2, $T::MAX / 2 + 1)); + assert_eq!($T::MIN.carrying_mul($T::MIN, $T::MIN), ($U::MAX / 2 + 1, $T::MAX / 2)); + } + + #[test] + #[cfg(not(bootstrap))] + fn test_carrying2_mul() { + assert_eq!($T::MAX.carrying2_mul($T::MAX, 0, 0), (1, $T::MAX / 2)); + assert_eq!($T::MAX.carrying2_mul($T::MAX, $T::MAX, 0), ($U::MAX / 2 + 1, $T::MAX / 2)); + assert_eq!( + $T::MAX.carrying2_mul($T::MAX, $T::MIN, 0), + ($U::MAX / 2 + 2, $T::MAX / 2 - 1) + ); + assert_eq!($T::MAX.carrying2_mul($T::MAX, $T::MAX, $T::MAX), ($U::MAX, $T::MAX / 2)); + assert_eq!($T::MAX.carrying2_mul($T::MAX, $T::MAX, $T::MIN), (0, $T::MAX / 2)); + assert_eq!($T::MAX.carrying2_mul($T::MAX, $T::MIN, $T::MIN), (1, $T::MAX / 2 - 1)); + assert_eq!($T::MIN.carrying2_mul($T::MAX, 0, 0), ($T::MIN as $U, $T::MIN / 2)); + assert_eq!($T::MIN.carrying2_mul($T::MAX, $T::MAX, 0), ($U::MAX, $T::MIN / 2)); + assert_eq!($T::MIN.carrying2_mul($T::MAX, $T::MIN, 0), (0, $T::MIN / 2)); + assert_eq!( + $T::MIN.carrying2_mul($T::MAX, $T::MAX, $T::MAX), + ($U::MAX / 2 - 1, $T::MIN / 2 + 1) + ); + assert_eq!( + $T::MIN.carrying2_mul($T::MAX, $T::MAX, $T::MIN), + ($U::MAX / 2, $T::MIN / 2) + ); + assert_eq!( + $T::MIN.carrying2_mul($T::MAX, $T::MIN, $T::MIN), + ($U::MAX / 2 + 1, $T::MIN / 2 - 1) + ); + assert_eq!($T::MIN.carrying2_mul($T::MIN, 0, 0), (0, $T::MAX / 2 + 1)); + assert_eq!($T::MIN.carrying2_mul($T::MIN, $T::MAX, 0), ($U::MAX / 2, $T::MAX / 2 + 1)); + assert_eq!($T::MIN.carrying2_mul($T::MIN, $T::MIN, 0), ($U::MAX / 2 + 1, $T::MAX / 2)); + assert_eq!( + $T::MIN.carrying2_mul($T::MIN, $T::MAX, $T::MAX), + ($U::MAX - 1, $T::MAX / 2 + 1) + ); + assert_eq!($T::MIN.carrying2_mul($T::MIN, $T::MAX, $T::MIN), ($U::MAX, $T::MAX / 2)); + assert_eq!($T::MIN.carrying2_mul($T::MIN, $T::MIN, $T::MIN), (0, $T::MAX / 2)); + } + #[test] fn test_midpoint() { assert_eq!(<$T>::midpoint(1, 3), 2); diff --git a/library/core/tests/num/uint_macros.rs b/library/core/tests/num/uint_macros.rs index f4fa789461eb8..4f1a2fa157a53 100644 --- a/library/core/tests/num/uint_macros.rs +++ b/library/core/tests/num/uint_macros.rs @@ -288,6 +288,27 @@ macro_rules! uint_module { assert_eq!($T::MAX.borrowing_sub($T::MAX, true), ($T::MAX, true)); } + #[cfg(not(bootstrap))] + #[test] + fn test_widening_mul() { + assert_eq!($T::MAX.widening_mul($T::MAX), (1, $T::MAX - 1)); + } + + #[cfg(not(bootstrap))] + #[test] + fn test_carrying_mul() { + assert_eq!($T::MAX.carrying_mul($T::MAX, 0), (1, $T::MAX - 1)); + assert_eq!($T::MAX.carrying_mul($T::MAX, $T::MAX), (0, $T::MAX)); + } + + #[cfg(not(bootstrap))] + #[test] + fn test_carrying2_mul() { + assert_eq!($T::MAX.carrying2_mul($T::MAX, 0, 0), (1, $T::MAX - 1)); + assert_eq!($T::MAX.carrying2_mul($T::MAX, $T::MAX, 0), (0, $T::MAX)); + assert_eq!($T::MAX.carrying2_mul($T::MAX, $T::MAX, $T::MAX), ($T::MAX, $T::MAX)); + } + #[test] fn test_midpoint() { assert_eq!(<$T>::midpoint(1, 3), 2);