use crate::{
ext::xmpfr::{self, raw_round},
float::{self, Round, Special},
misc::{NegAbs, UnwrappedCast},
Assign, Float,
};
use az::{Az, WrappingCast};
use core::{
cell::UnsafeCell,
mem::{self, MaybeUninit},
ops::Deref,
ptr::NonNull,
};
use gmp_mpfr_sys::{
gmp::{self, limb_t},
mpfr::{self, exp_t, mpfr_t, prec_t},
};
use libc::c_int;
const LIMBS_IN_SMALL: usize = (128 / gmp::LIMB_BITS) as usize;
type Limbs = [MaybeUninit<limb_t>; LIMBS_IN_SMALL];
#[derive(Clone)]
pub struct SmallFloat {
inner: Mpfr,
limbs: Limbs,
}
#[repr(C)]
pub struct Mpfr {
pub prec: prec_t,
pub sign: c_int,
pub exp: exp_t,
pub d: UnsafeCell<NonNull<limb_t>>,
}
impl Clone for Mpfr {
fn clone(&self) -> Mpfr {
Mpfr {
prec: self.prec,
sign: self.sign,
exp: self.exp,
d: UnsafeCell::new(unsafe { *self.d.get() }),
}
}
}
static_assert!(mem::size_of::<Limbs>() == 16);
static_assert_same_layout!(Mpfr, mpfr_t);
unsafe impl Send for SmallFloat {}
impl Default for SmallFloat {
#[inline]
fn default() -> Self {
SmallFloat::new()
}
}
impl SmallFloat {
#[inline]
pub const fn new() -> Self {
SmallFloat {
inner: Mpfr {
prec: float::prec_min() as prec_t,
sign: 1,
exp: xmpfr::EXP_ZERO,
d: UnsafeCell::new(NonNull::dangling()),
},
limbs: small_limbs![],
}
}
#[inline]
pub unsafe fn as_nonreallocating_float(&mut self) -> &mut Float {
self.update_d();
let ptr = cast_ptr_mut!(&mut self.inner, Float);
&mut *ptr
}
#[inline]
fn update_d(&self) {
let d = NonNull::<[MaybeUninit<limb_t>]>::from(&self.limbs[..]);
unsafe {
*self.inner.d.get() = d.cast();
}
}
}
impl Deref for SmallFloat {
type Target = Float;
#[inline]
fn deref(&self) -> &Float {
self.update_d();
let ptr = cast_ptr!(&self.inner, Float);
unsafe { &*ptr }
}
}
pub trait ToSmall: SealedToSmall {}
pub trait SealedToSmall: Copy {
unsafe fn copy(self, inner: *mut Mpfr, limbs: &mut Limbs);
}
macro_rules! unsafe_signed {
($($I:ty)*) => { $(
impl ToSmall for $I {}
impl SealedToSmall for $I {
#[inline]
unsafe fn copy(self, inner: *mut Mpfr, limbs: &mut Limbs) {
let (neg, abs) = self.neg_abs();
abs.copy(inner, limbs);
if neg {
(*inner).sign = -1;
}
}
}
)* };
}
macro_rules! unsafe_unsigned_32 {
($U:ty, $bits:expr) => {
impl ToSmall for $U {}
impl SealedToSmall for $U {
#[inline]
unsafe fn copy(self, inner: *mut Mpfr, limbs: &mut Limbs) {
let ptr = cast_ptr_mut!(inner, mpfr_t);
let limbs_ptr = cast_ptr_mut!(limbs.as_mut_ptr(), limb_t);
if self == 0 {
xmpfr::custom_zero(ptr, limbs_ptr, $bits);
} else {
let leading = self.leading_zeros();
let limb_leading = leading + gmp::LIMB_BITS.az::<u32>() - $bits;
limbs[0] = MaybeUninit::new(limb_t::from(self) << limb_leading);
let exp = $bits - leading;
xmpfr::custom_regular(ptr, limbs_ptr, exp.unwrapped_cast(), $bits);
}
}
}
};
}
unsafe_signed! { i8 i16 i32 i64 i128 isize }
unsafe_unsigned_32! { u8, 8 }
unsafe_unsigned_32! { u16, 16 }
unsafe_unsigned_32! { u32, 32 }
impl ToSmall for u64 {}
impl SealedToSmall for u64 {
#[inline]
unsafe fn copy(self, inner: *mut Mpfr, limbs: &mut Limbs) {
let ptr = cast_ptr_mut!(inner, mpfr_t);
let limbs_ptr = cast_ptr_mut!(limbs.as_mut_ptr(), limb_t);
if self == 0 {
xmpfr::custom_zero(ptr, limbs_ptr, 64);
} else {
let leading = self.leading_zeros();
let sval = self << leading;
#[cfg(gmp_limb_bits_64)]
{
limbs[0] = MaybeUninit::new(sval);
}
#[cfg(gmp_limb_bits_32)]
{
limbs[0] = MaybeUninit::new(sval.wrapping_cast());
limbs[1] = MaybeUninit::new((sval >> 32).wrapping_cast());
}
xmpfr::custom_regular(ptr, limbs_ptr, (64 - leading).unwrapped_cast(), 64);
}
}
}
impl ToSmall for u128 {}
impl SealedToSmall for u128 {
#[inline]
unsafe fn copy(self, inner: *mut Mpfr, limbs: &mut Limbs) {
let ptr = cast_ptr_mut!(inner, mpfr_t);
let limbs_ptr = cast_ptr_mut!(limbs.as_mut_ptr(), limb_t);
if self == 0 {
xmpfr::custom_zero(ptr, limbs_ptr, 128);
} else {
let leading = self.leading_zeros();
let sval = self << leading;
#[cfg(gmp_limb_bits_64)]
{
limbs[0] = MaybeUninit::new(sval.wrapping_cast());
limbs[1] = MaybeUninit::new((sval >> 64).wrapping_cast());
}
#[cfg(gmp_limb_bits_32)]
{
limbs[0] = MaybeUninit::new(sval.wrapping_cast());
limbs[1] = MaybeUninit::new((sval >> 32).wrapping_cast());
limbs[2] = MaybeUninit::new((sval >> 64).wrapping_cast());
limbs[3] = MaybeUninit::new((sval >> 96).wrapping_cast());
}
xmpfr::custom_regular(ptr, limbs_ptr, (128 - leading).unwrapped_cast(), 128);
}
}
}
impl ToSmall for usize {}
impl SealedToSmall for usize {
#[inline]
unsafe fn copy(self, inner: *mut Mpfr, limbs: &mut Limbs) {
#[cfg(target_pointer_width = "32")]
{
(self.az::<u32>()).copy(inner, limbs);
}
#[cfg(target_pointer_width = "64")]
{
(self.az::<u64>()).copy(inner, limbs);
}
}
}
impl ToSmall for f32 {}
impl SealedToSmall for f32 {
#[inline]
unsafe fn copy(self, inner: *mut Mpfr, limbs: &mut Limbs) {
let ptr = cast_ptr_mut!(inner, mpfr_t);
let limbs_ptr = cast_ptr_mut!(limbs.as_mut_ptr(), limb_t);
xmpfr::custom_zero(ptr, limbs_ptr, 24);
mpfr::set_d(ptr, self.into(), raw_round(Round::Nearest));
if self.is_sign_negative() {
(*inner).sign = -1;
}
}
}
impl ToSmall for f64 {}
impl SealedToSmall for f64 {
#[inline]
unsafe fn copy(self, inner: *mut Mpfr, limbs: &mut Limbs) {
let ptr = cast_ptr_mut!(inner, mpfr_t);
let limbs_ptr = cast_ptr_mut!(limbs.as_mut_ptr(), limb_t);
xmpfr::custom_zero(ptr, limbs_ptr, 53);
mpfr::set_d(ptr, self, raw_round(Round::Nearest));
if self.is_sign_negative() {
(*inner).sign = -1;
}
}
}
impl ToSmall for Special {}
impl SealedToSmall for Special {
#[inline]
unsafe fn copy(self, inner: *mut Mpfr, limbs: &mut Limbs) {
let ptr = cast_ptr_mut!(inner, mpfr_t);
let limbs_ptr = cast_ptr_mut!(limbs.as_mut_ptr(), limb_t);
xmpfr::custom_special(ptr, limbs_ptr, self, float::prec_min().az());
}
}
impl<T: ToSmall> Assign<T> for SmallFloat {
#[inline]
fn assign(&mut self, src: T) {
unsafe {
src.copy(&mut self.inner, &mut self.limbs);
}
}
}
impl<T: ToSmall> From<T> for SmallFloat {
#[inline]
fn from(src: T) -> Self {
let mut inner = Mpfr {
prec: 0,
sign: 0,
exp: 0,
d: UnsafeCell::new(NonNull::dangling()),
};
let mut limbs = small_limbs![];
unsafe {
src.copy(&mut inner, &mut limbs);
}
SmallFloat { inner, limbs }
}
}
impl Assign<&Self> for SmallFloat {
#[inline]
fn assign(&mut self, other: &Self) {
self.clone_from(other);
}
}
impl Assign for SmallFloat {
#[inline]
fn assign(&mut self, other: Self) {
drop(mem::replace(self, other));
}
}
#[inline]
pub(crate) unsafe fn unchecked_get_unshifted_u8(small: &SmallFloat) -> u8 {
debug_assert!(small.prec() >= 8);
debug_assert!(small.is_normal());
(small.limbs[0].assume_init() >> (gmp::LIMB_BITS - 8)).wrapping_cast()
}
#[inline]
pub(crate) unsafe fn unchecked_get_unshifted_u16(small: &SmallFloat) -> u16 {
debug_assert!(small.prec() >= 16);
debug_assert!(small.is_normal());
(small.limbs[0].assume_init() >> (gmp::LIMB_BITS - 16)).wrapping_cast()
}
#[inline]
pub(crate) unsafe fn unchecked_get_unshifted_u32(small: &SmallFloat) -> u32 {
debug_assert!(small.prec() >= 32);
debug_assert!(small.is_normal());
#[cfg(gmp_limb_bits_32)]
{
small.limbs[0].assume_init()
}
#[cfg(gmp_limb_bits_64)]
{
(small.limbs[0].assume_init() >> 32).wrapping_cast()
}
}
#[inline]
pub(crate) unsafe fn unchecked_get_unshifted_u64(small: &SmallFloat) -> u64 {
debug_assert!(small.prec() >= 64);
debug_assert!(small.is_normal());
#[cfg(gmp_limb_bits_32)]
{
u64::from(small.limbs[0].assume_init()) | (u64::from(small.limbs[1].assume_init()) << 32)
}
#[cfg(gmp_limb_bits_64)]
{
small.limbs[0].assume_init()
}
}
#[inline]
pub(crate) unsafe fn unchecked_get_unshifted_u128(small: &SmallFloat) -> u128 {
debug_assert!(small.prec() >= 128);
debug_assert!(small.is_normal());
#[cfg(gmp_limb_bits_32)]
{
u128::from(small.limbs[0].assume_init())
| (u128::from(small.limbs[1].assume_init()) << 32)
| (u128::from(small.limbs[2].assume_init()) << 64)
| (u128::from(small.limbs[3].assume_init()) << 96)
}
#[cfg(gmp_limb_bits_64)]
{
u128::from(small.limbs[0].assume_init()) | (u128::from(small.limbs[1].assume_init()) << 64)
}
}
#[cfg(test)]
#[allow(clippy::float_cmp)]
mod tests {
use crate::{
float::{self, FreeCache, SmallFloat, Special},
Assign,
};
#[test]
fn check_assign() {
let mut f = SmallFloat::from(-1.0f32);
assert_eq!(*f, -1.0);
f.assign(-2.0f64);
assert_eq!(*f, -2.0);
let other = SmallFloat::from(4u8);
f.assign(&other);
assert_eq!(*f, 4);
f.assign(5i8);
assert_eq!(*f, 5);
f.assign(other);
assert_eq!(*f, 4);
f.assign(6u16);
assert_eq!(*f, 6);
f.assign(-6i16);
assert_eq!(*f, -6);
f.assign(6u32);
assert_eq!(*f, 6);
f.assign(-6i32);
assert_eq!(*f, -6);
f.assign(6u64);
assert_eq!(*f, 6);
f.assign(-6i64);
assert_eq!(*f, -6);
f.assign(6u128);
assert_eq!(*f, 6);
f.assign(-6i128);
assert_eq!(*f, -6);
f.assign(6usize);
assert_eq!(*f, 6);
f.assign(-6isize);
assert_eq!(*f, -6);
f.assign(0u32);
assert_eq!(*f, 0);
f.assign(Special::Infinity);
assert!(f.is_infinite() && f.is_sign_positive());
f.assign(Special::NegZero);
assert!(f.is_zero() && f.is_sign_negative());
f.assign(Special::NegInfinity);
assert!(f.is_infinite() && f.is_sign_negative());
f.assign(Special::Zero);
assert!(f.is_zero() && f.is_sign_positive());
f.assign(Special::Nan);
assert!(f.is_nan());
float::free_cache(FreeCache::All);
}
}