diff --git a/CHANGELOG b/CHANGELOG index 752ba92d..a6e07dad 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -14,8 +14,12 @@ Current head (v.1.3 RC) - [all] Limit explicit use of exceptions and adapt to compilation mode (#135) - [fitting] Fix MeanNormal and MeanNormalDer classes for mean normal vector derivatives computation (#136) +- Tests + - [fitting] Compare single-pass covariance analysis with standard two-passes algorithm (#143) + - Docs - [spatialPartitioning] Update KdTree docs to reflect the kdtree API refactor (#129) + - [fitting] Add equations explaining how single-pass covariance analysis works (#143) -------------------------------------------------------------------------------- v.1.2 diff --git a/Ponca/src/Fitting/covarianceFit.h b/Ponca/src/Fitting/covarianceFit.h index a3fbd229..78881199 100644 --- a/Ponca/src/Fitting/covarianceFit.h +++ b/Ponca/src/Fitting/covarianceFit.h @@ -16,14 +16,32 @@ namespace Ponca { /*! - \brief Line fitting procedure that minimize the orthogonal distance - between the samples and the fitted primitive. + \brief Procedure that compute and decompose the covariance matrix of the neighbors positions in \f$3d\f$. + + This process is commonly used for plane fitting and local variance analysis. It is often called Principal + Component Analysis (PCA) of the neighborhood, and used in Geometry Processing and Computer Vision. \inherit Concept::FittingProcedureConcept - \see Line \see CovariancePlaneFit which use a similar approach for Plane estimation - \todo Add equations + ### Computation details + Standard PCA algorithm involves a two-steps process where the barycenter \f$\mathbf{b}\f$ is first computed, + and then the covariance matrix \f$\text{C}\f$ (in the following, the weights are ignored for clarity but without + loss of generality): + \f{align} + \mathbf{b} &= \frac{1}{n}\sum_i\mathbf{p}_i \\ + \text{C} &= \frac{1}{n}\sum_i(\mathbf{p}_i-\mathbf{b})(\mathbf{p}_i-\mathbf{b})^T + \f} + This class implements a single-pass version, where the first formulation is re-expressed as follows: + \f{align} + \text{C} &= \frac{1}{n}\sum_i (\mathbf{p}_i\mathbf{p}_i^T - \mathbf{b}\mathbf{p}_i^T - \mathbf{p}_i\mathbf{b}^T + \mathbf{b}\mathbf{b}^T) \\ + &= \frac{1}{n}\sum_i (\mathbf{p}_i\mathbf{p}_i^T) - \frac{1}{n}\sum_i(\mathbf{b}\mathbf{p}_i^T) - \frac{1}{n}\sum_i(\mathbf{p}_i\mathbf{b}^T) + \frac{1}{n}\sum_i (\mathbf{b}\mathbf{b}^T) \\ + &= \frac{1}{n}\sum_i (\mathbf{p}_i\mathbf{p}_i^T) - \mathbf{b}\frac{1}{n}\sum_i(\mathbf{p}_i^T) - \frac{1}{n}\sum_i(\mathbf{p}_i)\mathbf{b}^T + \frac{1}{n}\sum_i(1) \mathbf{b}\mathbf{b}^T \\ + &= \frac{1}{n}\sum_i (\mathbf{p}_i\mathbf{p}_i^T) - \mathbf{b}\mathbf{b}^T - \mathbf{b}\mathbf{b}^T + \mathbf{b}\mathbf{b}^T \f} + Leading to a single pass where \f$\text{C}\f$ is express by substracting two terms that can be computed independently + in one run: + \f[ \text{C} = \frac{1}{n}\sum_i (\mathbf{p}_i\mathbf{p}_i^T) - \mathbf{b}\mathbf{b}^T \f] + \warning This class is valid only in 3D. */ diff --git a/tests/src/CMakeLists.txt b/tests/src/CMakeLists.txt index 5a0c7405..c35c7507 100644 --- a/tests/src/CMakeLists.txt +++ b/tests/src/CMakeLists.txt @@ -14,6 +14,7 @@ add_multi_test(fit_unoriented.cpp) add_multi_test(gls_compare.cpp) add_multi_test(plane_primitive.cpp) add_multi_test(fit_plane.cpp) +add_multi_test(fit_cov.cpp) add_multi_test(fit_line.cpp) add_multi_test(fit_monge_patch.cpp) add_multi_test(basket.cpp) diff --git a/tests/src/fit_cov.cpp b/tests/src/fit_cov.cpp new file mode 100644 index 00000000..50d833d9 --- /dev/null +++ b/tests/src/fit_cov.cpp @@ -0,0 +1,237 @@ +/* + Copyright (C) 2014 Nicolas Mellado + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. +*/ + + +/*! + \file test/src/fit_cov.cpp + \brief Test validity of the covariance fitting procedures wrt to standard algorithm + */ + +#include "../common/testing.h" +#include "../common/testUtils.h" + +#include +#include +#include +#include +#include +#include + +#include + +using namespace std; +using namespace Ponca; + + +/// Class that perform the covariance fit using standard two-passes procedure +template < class DataPoint, class _WFunctor, typename T> +class CovarianceFitTwoPassesBase : public T +{ + PONCA_FITTING_DECLARE_DEFAULT_TYPES + +protected: + enum + { + Check = Base::PROVIDES_MEAN_POSITION, + PROVIDES_POSITION_COVARIANCE + }; + +public: + using MatrixType = typename DataPoint::MatrixType; /*!< \brief Alias to matrix type*/ + /*! \brief Solver used to analyse the covariance matrix*/ + using Solver = Eigen::SelfAdjointEigenSolver; + +protected: + // computation data + MatrixType m_cov {MatrixType::Zero()}; /*!< \brief Covariance matrix */ + Solver m_solver; /*!<\brief Solver used to analyse the covariance matrix */ + bool m_barycenterReady {false}; + VectorType m_barycenter; + Scalar sumW; + +public: + PONCA_EXPLICIT_CAST_OPERATORS(CovarianceFitTwoPassesBase,covarianceFitTwoPasses) + PONCA_FITTING_DECLARE_INIT_ADD_FINALIZE + + /*! \brief Reading access to the Solver used to analyse the covariance matrix */ + PONCA_MULTIARCH inline const Solver& solver() const { return m_solver; } +}; + +template < class DataPoint, class _WFunctor, typename T> +void +CovarianceFitTwoPassesBase::init(const CovarianceFitTwoPassesBase::VectorType& _evalPos) +{ + Base::init(_evalPos); + m_cov.setZero(); + m_barycenterReady = false; + m_barycenter.setZero(); + sumW = Scalar(0); +} + +template < class DataPoint, class _WFunctor, typename T> +bool +CovarianceFitTwoPassesBase::addLocalNeighbor(Scalar w, + const VectorType &localQ, + const DataPoint &attributes) +{ + if( ! m_barycenterReady ) /// first pass + { + return Base::addLocalNeighbor(w, localQ, attributes); + } + else { /// second pass + VectorType q = localQ - m_barycenter; // saved from previous run + m_cov += w * q * q.transpose(); + sumW += w; + return true; + } + return false; +} + + +template < class DataPoint, class _WFunctor, typename T> +FIT_RESULT +CovarianceFitTwoPassesBase::finalize () +{ + if( ! m_barycenterReady ){ /// end of the first pass + auto ret = Base::finalize(); + if(ret == STABLE) { + m_barycenterReady = true; + m_barycenter = Base::barycenter(); + return NEED_OTHER_PASS; + } + // handle specific configurations + if( ret != STABLE) + return Base::m_eCurrentState; + } + else { /// end of the second pass + // Normalize covariance matrix + m_cov /= sumW ; /// \warning There is a bug in the pass system that prevents to call Base::getWeightSum(); + + m_solver.compute(m_cov); + Base::m_eCurrentState = ( m_solver.info() == Eigen::Success ? STABLE : UNDEFINED ); + } + + return Base::m_eCurrentState; +} + + +template //, typename Fit, typename WeightFunction> +void testFunction(bool _bUnoriented = false, bool _bAddPositionNoise = false, bool _bAddNormalNoise = false) +{ + // Define related structure + typedef typename DataPoint::Scalar Scalar; + typedef typename DataPoint::VectorType VectorType; + + //generate sampled plane + int nbPoints = Eigen::internal::random(100, 1000); + + Scalar width = Eigen::internal::random(1., 10.); + Scalar height = width; + + Scalar analysisScale = Scalar(15.) * std::sqrt( width * height / nbPoints); + Scalar centerScale = Eigen::internal::random(1,10000); + VectorType center = VectorType::Random() * centerScale; + + VectorType direction = VectorType::Random().normalized(); + + Scalar epsilon = testEpsilon(); + vector vectorPoints(nbPoints); + + for(unsigned int i = 0; i < vectorPoints.size(); ++i) + { + vectorPoints[i] = getPointOnPlane(center, + direction, + width, + _bAddPositionNoise, + _bAddNormalNoise, + _bUnoriented); + } + + epsilon = testEpsilon(); + if ( _bAddPositionNoise) // relax a bit the testing threshold + epsilon = Scalar(0.01*MAX_NOISE); + // Test for each point if the fitted plane correspond to the theoretical plane + +#pragma omp parallel for + for(int i = 0; i < int(vectorPoints.size()); ++i) + { + + Fit fit; + fit.setWeightFunc(WeightFunc(analysisScale)); + fit.init(vectorPoints[i].pos()); + auto fitState = fit.compute(vectorPoints); + + FitRef ref; + ref.setWeightFunc(WeightFunc(analysisScale)); + ref.init(vectorPoints[i].pos()); + auto refState = ref.compute(vectorPoints); + + VERIFY(fitState == refState); + + auto checkVectors = [](const VectorType& x, const VectorType& y){ + Scalar values = std::min( + (x.array() - y.array()).abs().matrix().squaredNorm(), + (x.array() + y.array()).abs().matrix().squaredNorm()); // deal with sign ambiguity + VERIFY( Eigen::internal::isApproxOrLessThan(values, Scalar(1e-5)) ); + }; + + // check we get the same decomposition + checkVectors(fit.covarianceFit().solver().eigenvalues(), + ref.covarianceFitTwoPasses().solver().eigenvalues()); + for (int d = 0; d != 3; ++d) + checkVectors(fit.covarianceFit().solver().eigenvectors().col(d), + ref.covarianceFitTwoPasses().solver().eigenvectors().col(d)); + } +} + +template +void callSubTests() +{ + typedef PointPositionNormal Point; + + typedef DistWeightFunc > WeightSmoothFunc; + typedef DistWeightFunc > WeightConstantFunc; + + typedef Basket CovFitSmooth; + typedef Basket CovFitConstant; + + typedef Basket RefFitSmooth; + typedef Basket RefFitConstant; + + + cout << "Testing with data sampling a perfect plane..." << endl; + for(int i = 0; i < g_repeat; ++i) + { + //Test with perfect plane + CALL_SUBTEST(( testFunction() )); + CALL_SUBTEST(( testFunction() )); + } + cout << "Ok!" << endl; + + cout << "Testing with noise on position" << endl; + for(int i = 0; i < g_repeat; ++i) + { + CALL_SUBTEST(( testFunction(false, true, true) )); + CALL_SUBTEST(( testFunction(false, true, true) )); + } + cout << "Ok!" << endl; +} + +int main(int argc, char** argv) +{ + if(!init_testing(argc, argv)) + { + return EXIT_FAILURE; + } + + cout << "Test covariance matrix construction and decomposition for different baskets..." << endl; + + callSubTests(); + callSubTests(); + callSubTests(); +}