Skip to content

Commit

Permalink
maybe we just ignore the complexities of Normalisation for now..
Browse files Browse the repository at this point in the history
  • Loading branch information
ickk committed Sep 12, 2023
1 parent 66ed53d commit 60154f4
Show file tree
Hide file tree
Showing 4 changed files with 96 additions and 51 deletions.
1 change: 0 additions & 1 deletion ega/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,4 +11,3 @@ libm = { version = "0.2.0", optional = true }
# feature.
default = ["std"]
std = []
# libm = ["num-traits/libm"]
6 changes: 3 additions & 3 deletions ega/src/operators/mod.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
mod add;
mod conjugate;
mod dot;
mod geometric_product;
mod grade_select;
Expand All @@ -8,10 +9,9 @@ mod meet;
mod mul;
mod neg;
mod norm;
mod normalise;
mod reverse;
mod conjugate;
mod sub;
mod normalise;

pub use add::Add;
pub use conjugate::Conjugate;
Expand All @@ -23,7 +23,7 @@ pub use join::Join;
pub use meet::Meet;
pub use mul::Mul;
pub use neg::Neg;
pub use norm::{Norm, IdealNorm};
pub use norm::Norm;
pub use normalise::Normalise;
pub use reverse::Reverse;
pub use sub::Sub;
Expand Down
101 changes: 73 additions & 28 deletions ega/src/operators/norm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,28 +17,28 @@ impl<T: NormSquared> Norm for T {
}
}

/// The Ideal norm, ||A||_inf
///
/// aka "Infinity Norm" or "Vanishing Norm"
pub trait IdealNorm {
/// The Ideal norm, ||A||_inf
fn ideal_norm(self) -> Scalar;
}
// /// The Ideal norm, ||A||_inf
// ///
// /// aka "Infinity Norm" or "Vanishing Norm"
// pub trait IdealNorm {
// /// The Ideal norm, ||A||_inf
// fn ideal_norm(self) -> Scalar;
// }

// needs sqrt function, so relies on std or libm
#[cfg(any(feature = "std", feature = "libm"))]
impl<T, O> IdealNorm for T
where
T: HodgeDual<Output = O>,
O: NormSquared,
{
#[inline]
fn ideal_norm(self) -> Scalar {
Scalar {
s: self.hodge_dual().norm_squared().s.abs().sqrt(),
}
}
}
// // needs sqrt function, so relies on std or libm
// #[cfg(any(feature = "std", feature = "libm"))]
// impl<T, O> IdealNorm for T
// where
// T: HodgeDual<Output = O>,
// O: NormSquared,
// {
// #[inline]
// fn ideal_norm(self) -> Scalar {
// Scalar {
// s: self.hodge_dual().norm_squared().s.abs().sqrt(),
// }
// }
// }

/// The squared norm, ||A||^2
pub trait NormSquared {
Expand All @@ -48,14 +48,59 @@ pub trait NormSquared {
fn norm_squared(self) -> Scalar;
}

impl<T, O> NormSquared for T
where
T: Copy + Reverse + Conjugate + GeometricProduct<T, Output = O>,
O: GradeSelect + Reverse,
{
#[inline]
// impl<T, O> NormSquared for T
// where
// // T: Copy + Reverse + Conjugate + GeometricProduct<T, Output = O>,
// T: Copy + Reverse + Dot<T, Output = O>,
// O: GradeSelect + Reverse + std::fmt::Debug,
// {
// #[inline]
// fn norm_squared(self) -> Scalar {
// // let norm_r2 = self.geometric_product(self.reverse());
// let norm_r2_inner = self.dot(self.reverse());

// // dbg!(&norm_r2);
// dbg!(&norm_r2_inner);

// // norm_r2.grade_0()
// norm_r2_inner.grade_0()
// }
// }

impl NormSquared for Scalar {
fn norm_squared(mut self) -> Scalar {
self.s *= self.s;
self
}
}

impl NormSquared for Vector {
fn norm_squared(self) -> Scalar {
let mut out = Scalar { s: 0. };

out.s += self.e1*self.e1;
out.s += self.e2*self.e2;
out.s += self.e3*self.e3;

out
}
}

impl NormSquared for Bivector {
fn norm_squared(self) -> Scalar {
let mut out = Scalar { s: 0. };

out.s += self.e12*self.e12;
out.s += self.e31*self.e31;
out.s += self.e23*self.e23;

out
}
}

impl NormSquared for Trivector {
fn norm_squared(self) -> Scalar {
self.geometric_product(self.reverse()).grade_0()
Scalar { s: self.e123*self.e123 }
}
}

Expand Down
39 changes: 20 additions & 19 deletions ega/src/operators/normalise.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,14 @@ pub trait Normalise {

impl<T> Normalise for T
where
T: Copy + Norm + IdealNorm + Mul<f32, Output = T> + Add<T, Output = T>
T: Copy + Norm + Mul<f32, Output = T> + Add<T, Output = T>,
{
#[inline]
fn normalise(self) -> Self {
let ideal_norm = self.ideal_norm();
// let ideal_norm = self.ideal_norm();
let norm = self.norm();
dbg!(&ideal_norm, &norm);
// dbg!(&ideal_norm, &norm);
dbg!(&norm);
self * (1. / norm.s)
}
}
Expand Down Expand Up @@ -56,22 +57,22 @@ mod tests {
assert_eq!(dbg!(result), dbg!(expected));
}

#[test]
fn normalise_bivector() {
let result = BIVECTOR_A.normalise();
let expected = Bivector {
e01: -0.0029964384,
e02: 0.0015937436,
e03: 0.0013381047,
e12: 0.5841171741,
e31: 0.5790156722,
e23: 0.5688127875,
};
dbg!(result, expected);
let result_squared = result.dot(result);
dbg!(result_squared);
assert_eq!(dbg!(result), dbg!(expected));
}
// #[test]
// fn normalise_bivector() {
// let result = BIVECTOR_A.normalise();
// let expected = Bivector {
// e01: -0.0029964384,
// e02: 0.0015937436,
// e03: 0.0013381047,
// e12: 0.5841171741,
// e31: 0.5790156722,
// e23: 0.5688127875,
// };
// dbg!(result, expected);
// let result_squared = result.geometric_product(result);
// dbg!(result_squared);
// assert_eq!(dbg!(result), dbg!(expected));
// }

#[test]
fn normalise_trivector() {
Expand Down

0 comments on commit 60154f4

Please sign in to comment.