diff --git a/modules/core/include/JacobianAdjuster.h b/modules/core/include/JacobianAdjuster.h new file mode 100644 index 0000000000..c8eab8d2fc --- /dev/null +++ b/modules/core/include/JacobianAdjuster.h @@ -0,0 +1,161 @@ +/** + * \file IMP/core/JacobianAdjuster.h + * \brief Adjust score and gradients with Jacobian of parameter transformation. + * + * Copyright 2007-2018 IMP Inventors. All rights reserved. + */ + +#ifndef IMPCORE_JACOBIAN_ADJUSTER_H +#define IMPCORE_JACOBIAN_ADJUSTER_H + +#include +#include +#include +#include + +IMPCORE_BEGIN_NAMESPACE + +//! Store Jacobian factors of a univariate transformation +/** For a univariate transformation, \f$y = f(x)\f$, it stores the Jacobian + of the inverse transformation \f$f^{-1}(y)\f$ and the necessary + Jacobian-derived adjustment to the score and its gradient. + + \param[in] jacobian Jacobian value \f$J(y) = \frac{d}{dy}f^{-1}(y)\f$. + \param[in] score_adjustment Adjustment to score due to transformation, + \f$-\log |J(y)|\f$. + \param[in] gradient_adjustment Adjustment to gradient of score due to + transformation, + \f$-\frac{d}{dy} \log |J(y)|\f$. + */ +IMP_NAMED_TUPLE_3(UnivariateJacobian, UnivariateJacobians, Float, jacobian, + Float, score_adjustment, Float, gradient_adjustment,); + +//! Store Jacobian factors of a multivariate transformation +/** For a multivariate transformation, \f$Y = f(X)\f$, it stores the Jacobian + of the inverse transformation \f$f^{-1}(X)\f$ and the necessary + Jacobian-derived adjustment to the score and its gradient. + + \param[in] jacobian Jacobian matrix \f$J(Y)\f$. + \param[in] score_adjustment Adjustment to score due to transformation, + \f$-\log |\det J(y)|\f$. + \param[in] gradient_adjustment Adjustment to gradient of score due to + transformation, + \f$-\nabla_Y \log |\det J(y)|\f$. + */ +IMP_NAMED_TUPLE_3(MultivariateJacobian, MultivariateJacobians, + FloatsList, jacobian, Float, score_adjustment, Floats, + gradient_adjustment,{ + IMP_IF_CHECK(USAGE_AND_INTERNAL) { + int N = gradient_adjustment_.size(); + IMP_USAGE_CHECK(N > 0, "Number of variables must be positive."); + IMP_USAGE_CHECK(jacobian_.size() == N, + "Jacobian number of rows must be equal to gradient adjustment size."); + for (unsigned int n = 0; n < N; ++n) { + IMP_USAGE_CHECK(jacobian_[n].size() == N, "Jacobian must be square."); + } + } +}); + +//! Adjust score and gradients with Jacobian of parameter transformation. +/** It stores UnivariateJacobians and MultivariateJacobians as well as the + particle and float attributes to which they are related. + + It computes the total necessary Jacobian adjustment to the score due to + transformation with get_score_adjustment(). It also adds the necessary + Jacobian gradient modification to all derivatives with + apply_gradient_adjustment(). + + \note This class should not be created directly. Instead, + get_jacobian_adjuster() should be used so that only one is + associated with each module. + */ +class IMPCOREEXPORT JacobianAdjuster : public IMP::ModelObject { + typedef IMP::FloatIndex FloatIndex; + IMP_NAMED_TUPLE_2(FloatsIndex, FloatsIndexes, ParticleIndex, particle, + FloatKeys, keys, ); + typedef std::pair UP; + typedef std::pair MP; + typedef IMP_KERNEL_SMALL_UNORDERED_MAP + UnivariateJacobianMap; + typedef IMP_KERNEL_SMALL_UNORDERED_MAP + MultivariateJacobianMap; + UnivariateJacobianMap uni_map_; + MultivariateJacobianMap multi_map_; + + public: + JacobianAdjuster(Model* m, const std::string name = "JacobianAdjuster%1%"); + + //! Set Jacobian for univariate transformation corresponding to attribute. + /** \param[in] ks Attribute of transformed parameter. + \param[in] pi Index of decorated particle. + \param[in] j Container of Jacobian, score adjustment, and gradient + adjustment due to transformation. + */ + void set_jacobian(FloatKey k, ParticleIndex pi, const UnivariateJacobian& j); + + //! Set Jacobian for multivariate transformation corresponding to attribute. + /** \param[in] ks Attributes of transformed parameter. + \param[in] pi Index of decorated particle. + \param[in] j Container of Jacobian matrix, score adjustment, and gradient + adjustment due to transformation. + */ + void set_jacobian(FloatKeys ks, ParticleIndex pi, + const MultivariateJacobian& j); + + //! Get stored Jacobian for univariate transformation. + const UnivariateJacobian& get_jacobian(FloatKey k, ParticleIndex pi) const; + + //! Get stored Jacobian for multivariate transformation. + const MultivariateJacobian& get_jacobian(FloatKeys ks, + ParticleIndex pi) const; + +#ifndef SWIG + UnivariateJacobian& access_jacobian(FloatKey k, ParticleIndex pi); + + MultivariateJacobian& access_jacobian(FloatKeys ks, ParticleIndex pi); +#endif + + //! Get total adjustment to score from stored Jacobians. + /** Only optimized attributes are considered. */ + double get_score_adjustment() const; + + //! Adjust derivatives of particles using stored Jacobians. + /** Only derivatives of optimized attributes are adjusted. */ + void apply_gradient_adjustment(); + + virtual ModelObjectsTemp do_get_inputs() const IMP_OVERRIDE; + + virtual ModelObjectsTemp do_get_outputs() const IMP_OVERRIDE; + + private: + static ModelKey get_score_state_key(); + void create_score_state(); + + IMP_OBJECT_METHODS(JacobianAdjuster); +}; + +IMP_OBJECTS(JacobianAdjuster, JacobianAdjusters); + +//! Get adjuster of Model's Jacobian +/** If one does not yet exist, it is created. */ +IMPCOREEXPORT JacobianAdjuster* get_jacobian_adjuster(Model* m); + + + +#if !defined(IMP_DOXYGEN) && !defined(SWIG) +//! Adjust all transformed parameter gradients with JacobianAdjuster. +class IMPCOREEXPORT JacobianAdjustGradient : public ScoreState { + public: + JacobianAdjustGradient(Model *m) + : ScoreState(m, "JacobianAdjustGradient%1%") {} + virtual void do_before_evaluate() IMP_OVERRIDE; + virtual void do_after_evaluate(DerivativeAccumulator *da) IMP_OVERRIDE; + virtual ModelObjectsTemp do_get_inputs() const IMP_OVERRIDE; + virtual ModelObjectsTemp do_get_outputs() const IMP_OVERRIDE; + IMP_OBJECT_METHODS(JacobianAdjustGradient); +}; +#endif + +IMPCORE_END_NAMESPACE + +#endif /* IMPCORE_JACOBIAN_ADJUSTER_H */ diff --git a/modules/core/pyext/swig.i-in b/modules/core/pyext/swig.i-in index 6521ceabd4..22174a0f63 100644 --- a/modules/core/pyext/swig.i-in +++ b/modules/core/pyext/swig.i-in @@ -198,6 +198,9 @@ IMP_SWIG_OBJECT(IMP::core, IsCollisionPairPredicate, IsCollisionPairPredicates); IMP_SWIG_VALUE(IMP::core, BinormalTerm, BinormalTermList); IMP_SWIG_OBJECT(IMP::core, MultipleBinormalRestraint, MultipleBinormalRestraints); +IMP_SWIG_OBJECT(IMP::core, JacobianAdjuster, JacobianAdjusters); +IMP_SWIG_VALUE(IMP::core, UnivariateJacobian, UnivariateJacobians); +IMP_SWIG_VALUE(IMP::core, MultivariateJacobian, MultivariateJacobians); /* Don't wrap internal classes */ %ignore IMP::core::internal::ChildArrayTraits; @@ -330,6 +333,7 @@ void visit_depth_first(Hierarchy d, HierarchyVisitor *f) %include "IMP/core/MCCGSampler.h" %include "IMP/core/MinimumRestraint.h" %include "IMP/core/Gaussian.h" +%include "IMP/core/JacobianAdjuster.h" %include "IMP/core/PairRestraint.h" %include "IMP/core/SingletonRestraint.h" diff --git a/modules/core/src/JacobianAdjuster.cpp b/modules/core/src/JacobianAdjuster.cpp new file mode 100644 index 0000000000..ec104490a5 --- /dev/null +++ b/modules/core/src/JacobianAdjuster.cpp @@ -0,0 +1,218 @@ +/** + * \file JacobianAdjuster.cpp + * \brief Classes for dealing with transformations of parameters. + * + * Copyright 2007-2018 IMP Inventors. All rights reserved. + * + */ + +#include +#include + +IMPCORE_BEGIN_NAMESPACE + +JacobianAdjuster::JacobianAdjuster(Model* m, const std::string name) + : ModelObject(m, name) {} + +void JacobianAdjuster::set_jacobian(FloatKey k, ParticleIndex pi, + const UnivariateJacobian& j) { + IMP_IF_CHECK(USAGE_AND_INTERNAL) { + IMP_USAGE_CHECK(get_model()->get_has_attribute(k, pi), + "Particle does not have Float attribute."); + } + + FloatIndex fi(pi, k); + UnivariateJacobianMap::iterator lb = uni_map_.lower_bound(fi); + if(lb != uni_map_.end() && !(uni_map_.key_comp()(fi, lb->first))) { + lb->second = j; + } else { + uni_map_.insert(lb, UnivariateJacobianMap::value_type(fi, j)); + set_has_dependencies(false); + create_score_state(); + } +} + +void JacobianAdjuster::set_jacobian( + FloatKeys ks, ParticleIndex pi, const MultivariateJacobian& j) { + IMP_IF_CHECK(USAGE_AND_INTERNAL) { + int N = ks.size(); + IMP_USAGE_CHECK( + N > 0, + "Multivariate transformed parameter must at least one key."); + IMP_USAGE_CHECK( + j.get_jacobian().size() == N && j.get_gradient_adjustment().size() == N, + "Jacobian and gradient adjustment dimensions must equal number of keys."); + IMP_FOREACH(FloatKey k, ks) { + IMP_USAGE_CHECK(get_model()->get_has_attribute(k, pi), + "Particle does not have Float attribute."); + } + } + + FloatsIndex fi(pi, ks); + MultivariateJacobianMap::iterator lb = multi_map_.lower_bound(fi); + if(lb != multi_map_.end() && !(multi_map_.key_comp()(fi, lb->first))) { + lb->second = j; + } else { + multi_map_.insert(lb, MultivariateJacobianMap::value_type(fi, j)); + set_has_dependencies(false); + create_score_state(); + } +} + +const UnivariateJacobian& JacobianAdjuster::get_jacobian(FloatKey k, ParticleIndex pi) const { + FloatIndex fi(pi, k); + UnivariateJacobianMap::const_iterator it; + it = uni_map_.find(fi); + IMP_USAGE_CHECK(it != uni_map_.end(), "Jacobian has not been added."); + return it->second; +} + +UnivariateJacobian& JacobianAdjuster::access_jacobian(FloatKey k, ParticleIndex pi) { + FloatIndex fi(pi, k); + UnivariateJacobianMap::iterator it; + it = uni_map_.find(fi); + IMP_USAGE_CHECK(it != uni_map_.end(), "Jacobian has not been added."); + return it->second; +} + +const MultivariateJacobian& JacobianAdjuster::get_jacobian(FloatKeys ks, ParticleIndex pi) const { + FloatsIndex fi(pi, ks); + MultivariateJacobianMap::const_iterator it; + it = multi_map_.find(fi); + IMP_USAGE_CHECK(it != multi_map_.end(), "Jacobian has not been added."); + return it->second; +} + +MultivariateJacobian& JacobianAdjuster::access_jacobian(FloatKeys ks, ParticleIndex pi) { + FloatsIndex fi(pi, ks); + MultivariateJacobianMap::iterator it; + it = multi_map_.find(fi); + IMP_USAGE_CHECK(it != multi_map_.end(), "Jacobian has not been added."); + return it->second; +} + +double JacobianAdjuster::get_score_adjustment() const { + double adj = 0; + Model *m = get_model(); + + FloatIndex fi; + IMP_FOREACH(UP up, uni_map_) { + fi = up.first; + if (m->get_is_optimized(fi.get_key(), fi.get_particle())) { + adj += up.second.get_score_adjustment(); + } + } + + FloatsIndex fsi; + IMP_FOREACH(MP mp, multi_map_) { + fsi = mp.first; + if (m->get_is_optimized(fsi.get_keys()[0], fsi.get_particle())) { + adj += mp.second.get_score_adjustment(); + } + } + + return adj; +} + +void JacobianAdjuster::apply_gradient_adjustment() { + Model *m = get_model(); + DerivativeAccumulator da = DerivativeAccumulator(); + + FloatIndex fi; + IMP_FOREACH(UP up, uni_map_) { + fi = up.first; + if (m->get_is_optimized(fi.get_key(), fi.get_particle())) { + m->add_to_derivative(fi.get_key(), fi.get_particle(), + up.second.get_gradient_adjustment(), da); + } + } + + FloatsIndex fsi; + ParticleIndex pi; + Floats grad_adj; + FloatKeys ks; + unsigned int n, N; + IMP_FOREACH(MP mp, multi_map_) { + fsi = mp.first; + FloatKeys ks = fsi.get_keys(); + pi = fsi.get_particle(); + if (m->get_is_optimized(ks[0], pi)) { + grad_adj = mp.second.get_gradient_adjustment(); + N = ks.size(); + IMP_INTERNAL_CHECK( + grad_adj.size() == N, + "Size of gradient doesn't match number of attributes."); + for (n = 0; n < N; ++n) { + m->add_to_derivative(ks[n], pi, grad_adj[n], da); + } + } + } +} + +ModelKey JacobianAdjuster::get_score_state_key() { + static ModelKey k("jacobian adjust gradient score state"); + return k; +} + +void JacobianAdjuster::create_score_state() { + ModelKey k = get_score_state_key(); + if (!get_model()->get_has_data(k)) { + IMP_NEW(JacobianAdjustGradient, ss, (get_model())); + get_model()->add_data(k, ss); + get_model()->add_score_state(ss.release()); + } else { + IMP::Pointer ss = + IMP::object_cast(get_model()->get_data(k)); + ss->set_has_dependencies(false); + } +} + +ModelObjectsTemp JacobianAdjuster::do_get_inputs() const { + ModelObjectsTemp ret; + Model *m = get_model(); + + ret.reserve(uni_map_.size() + multi_map_.size()); + + IMP_FOREACH(UP up, uni_map_) { + ret.push_back(m->get_particle(up.first.get_particle())); + } + + IMP_FOREACH(MP mp, multi_map_) { + ret.push_back(m->get_particle(mp.first.get_particle())); + } + + return ret; +} + +ModelObjectsTemp JacobianAdjuster::do_get_outputs() const { + return get_inputs(); +} + +JacobianAdjuster *get_jacobian_adjuster(Model *m) { + static ModelKey k("jacobian adjuster"); + if (!m->get_has_data(k)) { + IMP_NEW(JacobianAdjuster, ja, (m)); + m->add_data(k, ja); + return ja.release(); + } else { + IMP::Pointer ja = + IMP::object_cast(m->get_data(k)); + return ja.release(); + } +} + +void JacobianAdjustGradient::do_before_evaluate() {} + +void JacobianAdjustGradient::do_after_evaluate(DerivativeAccumulator *da) { + get_jacobian_adjuster(get_model())->apply_gradient_adjustment(); +} + +ModelObjectsTemp JacobianAdjustGradient::do_get_inputs() const { + return get_jacobian_adjuster(get_model())->get_inputs(); +} + +ModelObjectsTemp JacobianAdjustGradient::do_get_outputs() const { + return get_jacobian_adjuster(get_model())->get_outputs(); +} + +IMPCORE_END_NAMESPACE diff --git a/modules/core/test/test_jacobian_adjustment.py b/modules/core/test/test_jacobian_adjustment.py new file mode 100644 index 0000000000..abe35a42d8 --- /dev/null +++ b/modules/core/test/test_jacobian_adjustment.py @@ -0,0 +1,376 @@ +from __future__ import print_function +import numpy as np +import IMP +import IMP.core +import IMP.test + + +class DummyRestraint(IMP.Restraint): + + """Dummy do-nothing restraint""" + + def __init__(self, m, p, k, name="DummyRestraint %1%"): + IMP.Restraint.__init__(self, m, name) + self.p = p + self.k = k + + def unprotected_evaluate(self, accum): + if accum: + self.p.add_to_derivative(self.k, 0., accum) + return 0. + + def do_get_inputs(self): + return [self.p] + + +class Tests(IMP.test.TestCase): + + """Tests for classes for Jacobian adjustments to score and its gradient.""" + + @staticmethod + def get_random_multivariate_jacobian_factors(N): + J = np.random.normal(size=(N, N)) + score_adj = np.random.normal() + grad_adj = np.random.normal(size=N) + return J, score_adj, grad_adj + + def test_create_univariate_jacobian(self): + vs = np.random.normal(size=3) + j = IMP.core.UnivariateJacobian(*vs) + self.assertAlmostEqual(j.get_jacobian(), vs[0], delta=1e-6) + self.assertAlmostEqual(j.get_score_adjustment(), vs[1], delta=1e-6) + self.assertAlmostEqual(j.get_gradient_adjustment(), vs[2], delta=1e-6) + + def test_get_set_univariate_jacobian(self): + j = IMP.core.UnivariateJacobian(1, 0, 0) + vs = np.random.normal(size=3) + j.set_jacobian(vs[0]) + self.assertAlmostEqual(j.get_jacobian(), vs[0], delta=1e-6) + j.set_score_adjustment(vs[1]) + self.assertAlmostEqual(j.get_score_adjustment(), vs[1], delta=1e-6) + j.set_gradient_adjustment(vs[2]) + self.assertAlmostEqual(j.get_gradient_adjustment(), vs[2], delta=1e-6) + + def test_create_multivariate_jacobian(self): + N = 2 + J, score_adj, grad_adj = self.get_random_multivariate_jacobian_factors( + N + ) + j = IMP.core.MultivariateJacobian(J, score_adj, grad_adj) + + np.testing.assert_allclose(j.get_jacobian(), J, atol=1e-6) + self.assertAlmostEqual(j.get_score_adjustment(), score_adj, delta=1e-6) + np.testing.assert_allclose( + j.get_gradient_adjustment(), grad_adj, atol=1e-6 + ) + + def test_multivariate_jacobian_create_with_incompatible_shapes_raises_error( + self + ): + N = 2 + J, score_adj, _ = self.get_random_multivariate_jacobian_factors(N) + grad_adj = np.random.normal(size=N + 1) + self.assertRaisesUsageException( + IMP.core.MultivariateJacobian, J, score_adj, grad_adj + ) + + J = np.random.normal(size=(N, N + 1)) + grad_adj = np.random.normal(size=N) + self.assertRaisesUsageException( + IMP.core.MultivariateJacobian, J, score_adj, grad_adj + ) + + J = np.random.normal(size=(N, N)) + IMP.core.MultivariateJacobian(J, score_adj, grad_adj) + + def test_get_set_multivariate_jacobian(self): + N = 2 + J = np.random.normal(size=(N, N)) + score_adj = np.random.normal() + grad_adj = np.random.normal(size=N) + j = IMP.core.MultivariateJacobian( + *self.get_random_multivariate_jacobian_factors(N) + ) + + J, score_adj, grad_adj = self.get_random_multivariate_jacobian_factors( + N + ) + j.set_jacobian(J) + j.set_score_adjustment(score_adj) + j.set_gradient_adjustment(grad_adj) + + np.testing.assert_allclose(j.get_jacobian(), J, atol=1e-6) + self.assertAlmostEqual(j.get_score_adjustment(), score_adj, delta=1e-6) + np.testing.assert_allclose( + j.get_gradient_adjustment(), grad_adj, atol=1e-6 + ) + + def test_get_jacobian_adjuster(self): + m = IMP.Model() + ja1 = IMP.core.get_jacobian_adjuster(m) + ja2 = IMP.core.get_jacobian_adjuster(m) + self.assertEqual(ja1, ja2) + + def test_univariate_jacobian_roundtrip(self): + m = IMP.Model() + p = IMP.Particle(m) + k = IMP.FloatKey("dummy") + p.add_attribute(k, 10.) + vs = np.random.normal(size=3) + j1 = IMP.core.UnivariateJacobian(*vs) + ja = IMP.core.get_jacobian_adjuster(m) + ja.set_jacobian(k, p.get_index(), j1) + j2 = ja.get_jacobian(k, p) + self.assertEqual(j1, j2) + + def test_set_univariate_jacobian_without_attribute_raises_usage_error(self): + m = IMP.Model() + p = IMP.Particle(m) + k = IMP.FloatKey("dummy") + vs = np.random.normal(size=3) + j = IMP.core.UnivariateJacobian(*vs) + ja = IMP.core.get_jacobian_adjuster(m) + self.assertRaisesUsageException(ja.set_jacobian, k, p.get_index(), j) + + def test_univariate_get_inputs(self): + m = IMP.Model() + p = IMP.Particle(m) + k = IMP.FloatKey("dummy") + p.add_attribute(k, 10.) + j = IMP.core.UnivariateJacobian(1, 0, 0) + ja = IMP.core.get_jacobian_adjuster(m) + ja.set_jacobian(k, p.get_index(), j) + self.assertListEqual(ja.get_inputs(), [p]) + + def test_univariate_get_outputs(self): + m = IMP.Model() + p = IMP.Particle(m) + k = IMP.FloatKey("dummy") + p.add_attribute(k, 10.) + j = IMP.core.UnivariateJacobian(1, 0, 0) + ja = IMP.core.get_jacobian_adjuster(m) + ja.set_jacobian(k, p.get_index(), j) + self.assertListEqual(ja.get_outputs(), [p]) + + def test_univariate_get_score_adjustment(self): + m = IMP.Model() + ps = [IMP.Particle(m) for i in range(2)] + vs = [np.random.normal(size=3) for _ in ps] + ja = IMP.core.get_jacobian_adjuster(m) + for p, v in zip(ps, vs): + k = IMP.FloatKey("dummy") + p.add_attribute(k, 10.) + j = IMP.core.UnivariateJacobian(*v) + ja.set_jacobian(k, p.get_index(), j) + + self.assertAlmostEqual(ja.get_score_adjustment(), 0, delta=1e-6) + + ps[0].set_is_optimized(k, True) + self.assertAlmostEqual(ja.get_score_adjustment(), vs[0][1], delta=1e-6) + + def test_univariate_apply_gradient_adjustment(self): + m = IMP.Model() + p = IMP.Particle(m) + k = IMP.FloatKey("dummy") + p.add_attribute(k, 10.) + vs = np.random.normal(size=3) + j = IMP.core.UnivariateJacobian(*vs) + ja = IMP.core.get_jacobian_adjuster(m) + ja.set_jacobian(k, p.get_index(), j) + + self.assertAlmostEqual(p.get_derivative(k), 0., delta=1e-6) + + ja.apply_gradient_adjustment() + self.assertAlmostEqual(p.get_derivative(k), 0., delta=1e-6) + + p.set_is_optimized(k, True) + ja.apply_gradient_adjustment() + self.assertAlmostEqual(p.get_derivative(k), vs[2], delta=1e-6) + + def test_univariate_score_state_applies_gradient_adjustment(self): + m = IMP.Model() + ps = [IMP.Particle(m) for i in range(2)] + vs = [np.random.normal(size=3) for _ in ps] + ja = IMP.core.get_jacobian_adjuster(m) + rs = [] + for p, v in zip(ps, vs): + k = IMP.FloatKey("dummy") + p.add_attribute(k, 10.) + j = IMP.core.UnivariateJacobian(*v) + ja.set_jacobian(k, p.get_index(), j) + r = DummyRestraint(m, p, k) + rs.append(r) + + sf = IMP.core.RestraintsScoringFunction(rs) + + self.assertAlmostEqual(ps[0].get_derivative(k), 0, delta=1e-6) + self.assertAlmostEqual(ps[1].get_derivative(k), 0, delta=1e-6) + + sf.evaluate(True) + self.assertAlmostEqual(ps[0].get_derivative(k), 0, delta=1e-6) + self.assertAlmostEqual(ps[1].get_derivative(k), 0, delta=1e-6) + + ps[0].set_is_optimized(k, True) + sf.evaluate(True) + self.assertAlmostEqual(ps[0].get_derivative(k), vs[0][2], delta=1e-6) + self.assertAlmostEqual(ps[1].get_derivative(k), 0, delta=1e-6) + + def test_multivariate_jacobian_roundtrip(self): + m = IMP.Model() + p = IMP.Particle(m) + N = 2 + ks = [IMP.FloatKey("dummy" + str(i)) for i in range(N)] + for k in ks: + p.add_attribute(k, 1.) + j1 = IMP.core.MultivariateJacobian( + *self.get_random_multivariate_jacobian_factors(N) + ) + ja = IMP.core.get_jacobian_adjuster(m) + ja.set_jacobian(ks, p.get_index(), j1) + j2 = ja.get_jacobian(ks, p) + self.assertEqual(j1, j2) + + def test_set_multivariate_jacobian_without_attribute_raises_usage_error( + self + ): + m = IMP.Model() + p = IMP.Particle(m) + N = 2 + ks = [IMP.FloatKey("dummy" + str(i)) for i in range(N)] + j = IMP.core.MultivariateJacobian( + *self.get_random_multivariate_jacobian_factors(N) + ) + ja = IMP.core.get_jacobian_adjuster(m) + self.assertRaisesUsageException(ja.set_jacobian, ks, p.get_index(), j) + + def test_multivariate_get_inputs(self): + m = IMP.Model() + p = IMP.Particle(m) + N = 2 + ks = [IMP.FloatKey("dummy" + str(i)) for i in range(N)] + for k in ks: + p.add_attribute(k, 1.) + j = IMP.core.MultivariateJacobian( + *self.get_random_multivariate_jacobian_factors(N) + ) + ja = IMP.core.get_jacobian_adjuster(m) + ja.set_jacobian(ks, p.get_index(), j) + self.assertListEqual(ja.get_inputs(), [p]) + + def test_multivariate_get_outputs(self): + m = IMP.Model() + p = IMP.Particle(m) + N = 2 + ks = [IMP.FloatKey("dummy" + str(i)) for i in range(N)] + for k in ks: + p.add_attribute(k, 1.) + j = IMP.core.MultivariateJacobian( + *self.get_random_multivariate_jacobian_factors(N) + ) + ja = IMP.core.get_jacobian_adjuster(m) + ja.set_jacobian(ks, p.get_index(), j) + self.assertListEqual(ja.get_outputs(), [p]) + + def test_multivariate_get_score_adjustment(self): + m = IMP.Model() + ps = [IMP.Particle(m) for i in range(2)] + N = 2 + ja = IMP.core.get_jacobian_adjuster(m) + ks = [IMP.FloatKey("dummy" + str(i)) for i in range(N)] + score_adj = [] + for p in ps: + for k in ks: + p.add_attribute(k, 1.) + p.set_is_optimized(k, False) + j = IMP.core.MultivariateJacobian( + *self.get_random_multivariate_jacobian_factors(N) + ) + ja.set_jacobian(ks, p.get_index(), j) + score_adj.append(j.get_score_adjustment()) + + self.assertAlmostEqual(ja.get_score_adjustment(), 0, delta=1e-6) + + for k in ks: + ps[0].set_is_optimized(k, True) + self.assertAlmostEqual( + ja.get_score_adjustment(), score_adj[0], delta=1e-6 + ) + + def test_multivariate_apply_gradient_adjustment(self): + m = IMP.Model() + ps = [IMP.Particle(m) for i in range(2)] + N = 2 + ja = IMP.core.get_jacobian_adjuster(m) + ks = [IMP.FloatKey("dummy" + str(i)) for i in range(N)] + grad_adj = [] + for p in ps: + for k in ks: + p.add_attribute(k, 1.) + p.set_is_optimized(k, False) + j = IMP.core.MultivariateJacobian( + *self.get_random_multivariate_jacobian_factors(N) + ) + ja.set_jacobian(ks, p.get_index(), j) + grad_adj.append(j.get_gradient_adjustment()) + + for k in ks: + self.assertAlmostEqual(ps[0].get_derivative(k), 0., delta=1e-6) + self.assertAlmostEqual(ps[1].get_derivative(k), 0., delta=1e-6) + + ja.apply_gradient_adjustment() + for k in ks: + self.assertAlmostEqual(ps[0].get_derivative(k), 0., delta=1e-6) + self.assertAlmostEqual(ps[1].get_derivative(k), 0., delta=1e-6) + + for k in ks: + ps[0].set_is_optimized(k, True) + ja.apply_gradient_adjustment() + for i, k in enumerate(ks): + self.assertAlmostEqual( + ps[0].get_derivative(k), grad_adj[0][i], delta=1e-6 + ) + self.assertAlmostEqual(ps[1].get_derivative(k), 0., delta=1e-6) + + def test_multivariate_score_state_applies_gradient_adjustment(self): + m = IMP.Model() + ps = [IMP.Particle(m) for i in range(2)] + N = 2 + ja = IMP.core.get_jacobian_adjuster(m) + ks = [IMP.FloatKey("dummy" + str(i)) for i in range(N)] + grad_adj = [] + rs = [] + for p in ps: + for k in ks: + p.add_attribute(k, 1.) + p.set_is_optimized(k, False) + r = DummyRestraint(m, p, k) + rs.append(r) + j = IMP.core.MultivariateJacobian( + *self.get_random_multivariate_jacobian_factors(N) + ) + ja.set_jacobian(ks, p.get_index(), j) + grad_adj.append(j.get_gradient_adjustment()) + + sf = IMP.core.RestraintsScoringFunction(rs) + + for k in ks: + self.assertAlmostEqual(ps[0].get_derivative(k), 0, delta=1e-6) + self.assertAlmostEqual(ps[1].get_derivative(k), 0, delta=1e-6) + + sf.evaluate(True) + for k in ks: + self.assertAlmostEqual(ps[0].get_derivative(k), 0, delta=1e-6) + self.assertAlmostEqual(ps[1].get_derivative(k), 0, delta=1e-6) + + for k in ks: + ps[0].set_is_optimized(k, True) + sf.evaluate(True) + for i, k in enumerate(ks): + self.assertAlmostEqual( + ps[0].get_derivative(k), grad_adj[0][i], delta=1e-6 + ) + self.assertAlmostEqual(ps[1].get_derivative(k), 0, delta=1e-6) + + +if __name__ == "__main__": + IMP.test.main()