Skip to content

Commit

Permalink
Merge pull request #25 from DarkMachines/day1
Browse files Browse the repository at this point in the history
pybambi beta
  • Loading branch information
williamjameshandley authored Feb 8, 2019
2 parents 051be5d + b065aee commit f7f9c65
Show file tree
Hide file tree
Showing 13 changed files with 258 additions and 96 deletions.
6 changes: 3 additions & 3 deletions docs/source/pybambi.neuralnetworks.rst
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,10 @@ pybambi.neuralnetworks.kerasnet module
:undoc-members:
:show-inheritance:

pybambi.neuralnetworks.nearestneighbor module
---------------------------------------------
pybambi.neuralnetworks.nearestneighbour module
----------------------------------------------

.. automodule:: pybambi.neuralnetworks.nearestneighbor
.. automodule:: pybambi.neuralnetworks.nearestneighbour
:members:
:undoc-members:
:show-inheritance:
Expand Down
6 changes: 3 additions & 3 deletions docs/source/pybambi.rst
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,10 @@ pybambi.bambi module
:undoc-members:
:show-inheritance:

pybambi.dumper module
---------------------
pybambi.manager module
----------------------

.. automodule:: pybambi.dumper
.. automodule:: pybambi.manager
:members:
:undoc-members:
:show-inheritance:
Expand Down
34 changes: 29 additions & 5 deletions pybambi/bambi.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
Date: November 2018
"""
import os
from pybambi.dumper import dumper
from pybambi.manager import BambiManager


def run_pyBAMBI(loglikelihood, prior, nDims, **kwargs):
Expand Down Expand Up @@ -32,27 +32,51 @@ def run_pyBAMBI(loglikelihood, prior, nDims, **kwargs):
efficiency for multinest.
Default `0.5**nDims`
learner: object
information indicating what learning algorithm to use for approximating
the likelihood. Can be the string `'keras'`, or a `keras.models.Model`
Default `'keras'`
ntrain: int
Number of training points to use
Default `nlive`
proxy_tolerance: float
Required accuracy of proxy.
Default `0.01`
ns_output: int
Nested sampling output level.
"""
# Process kwargs
nested_sampler = kwargs.pop('nested_sampler', 'polychord')
nlive = kwargs.pop('nlive', nDims*25)
root = kwargs.pop('root', os.path.join('chains', nested_sampler))
num_repeats = kwargs.pop('num_repeats', nDims*5)
eff = kwargs.pop('eff', 0.5**nDims)
learner = kwargs.pop('learner', 'keras')
proxy_tolerance = kwargs.pop('proxy_tolerance', 0.1)
failure_tolerance = kwargs.pop('failure_tolerance', 0.5)
ntrain = kwargs.pop('ntrain', nlive)

if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)

# Set up the global manager of the BAMBI session.
thumper = BambiManager(loglikelihood, learner, proxy_tolerance,
failure_tolerance, ntrain)

# Choose and run sampler
if nested_sampler == 'polychord':
from pybambi.polychord import run_polychord
run_polychord(loglikelihood, prior, dumper, nDims,
nlive, root, num_repeats)
run_polychord(thumper.loglikelihood, prior, thumper.dumper, nDims,
nlive, root, ntrain//2, num_repeats)

elif nested_sampler == 'multinest':
from pybambi.multinest import run_multinest
run_multinest(loglikelihood, prior, dumper, nDims,
nlive, root, eff)
run_multinest(thumper.loglikelihood, prior, thumper.dumper, nDims,
nlive, root, ntrain//2, eff)

else:
raise NotImplementedError('nested sampler %s is not implemented'
Expand Down
25 changes: 0 additions & 25 deletions pybambi/dumper.py

This file was deleted.

98 changes: 98 additions & 0 deletions pybambi/manager.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
"""BAMBI management object.
Author: Pat Scott (p.scott@imperial.ac.uk)
Date: Feb 2019
"""

import numpy as np

from pybambi.neuralnetworks.kerasnet import KerasNetInterpolation
from pybambi.neuralnetworks.nearestneighbour \
import NearestNeighbourInterpolation
import keras.models


class BambiManager(object):
"""Does all the talking for BAMBI.
Takes a new set of training data from the dumper and trains (or retrains) a
neural net, and assesses whether or not it can be used for a given
parameter combination.
Parameters
----------
ntrain: int
Number of training points to use
"""

def __init__(self, loglikelihood, learner, proxy_tolerance,
failure_tolerance, ntrain):
"""Construct bambi object."""
self.proxy_tolerance = proxy_tolerance
self._loglikelihood = loglikelihood
self._learner = learner
self._proxy_tolerance = proxy_tolerance
self._failure_tolerance = failure_tolerance
self._ntrain = ntrain
self._proxy_trained = False
self.old_learners = []

def make_learner(self, params, loglikes):
"""Construct a Predictor."""
if self._learner == 'keras':
return KerasNetInterpolation(params, loglikes)
elif self._learner == 'nearestneighbour':
return NearestNeighbourInterpolation(params, loglikes)
elif issubclass(type(self._learner), keras.models.Model):
return KerasNetInterpolation(params, loglikes, model=self._learner)
else:
raise NotImplementedError('learner %s is not implemented.'
% self._learner)

def dumper(self, live_params, live_loglks, dead_params, dead_loglks):
"""Respond to signal from nested sampler."""
if not self._proxy_trained:
params = np.concatenate((live_params, dead_params))
loglikes = np.concatenate((live_loglks, dead_loglks))
self.train_new_learner(params[:self._ntrain, :],
loglikes[:self._ntrain])
if self._proxy_trained:
print("Using trained proxy")
else:
print("Unable to use proxy")

def loglikelihood(self, params):
"""Bambi Proxy wrapper for original loglikelihood."""
# Short circuit to the full likelihood if proxy not yet fully trained
if not self._proxy_trained:
return self._loglikelihood(params)

# Call the learner
candidate_loglikelihood = self._current_learner(params)

# If the learner can be trusted, use its estimate,
# otherwise use the original like and update the failure status
if self._current_learner.valid(candidate_loglikelihood):
return candidate_loglikelihood
else:
self._rolling_failure_fraction = (1.0 + (self._ntrain - 1.0) *
self._rolling_failure_fraction
) / self._ntrain
if self._rolling_failure_fraction > self._failure_tolerance:
self._proxy_trained = False
return self._loglikelihood(params)

def train_new_learner(self, params, loglikes):
"""Train a new Predictor."""
try:
self.old_learners.append(self._current_learner)
except AttributeError:
pass
self._current_learner = self.make_learner(params, loglikes)
sigma = self._current_learner.uncertainty()
print("Current uncertainty in network log-likelihood predictions: %s"
% sigma)
if sigma < self._proxy_tolerance:
self._proxy_trained = True
self._rolling_failure_fraction = 0.0
18 changes: 13 additions & 5 deletions pybambi/multinest.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@
from numpy.ctypeslib import as_array


def run_multinest(loglikelihood, prior, dumper, nDims, nlive, root, eff):
def run_multinest(loglikelihood, prior, dumper, nDims, nlive, root, ndump,
eff):
"""Run MultiNest.
See https://arxiv.org/abs/0809.3437 for more detail
Expand Down Expand Up @@ -46,24 +47,31 @@ def run_multinest(loglikelihood, prior, dumper, nDims, nlive, root, eff):
root: str
base name for output files
ndump: int
How many iterations between dumper function calls
eff: float
Efficiency of MultiNest
"""
import pymultinest

def multinest_prior(cube, ndim, nparams):
return prior(as_array(cube, shape=(nparams,)))
theta = prior(as_array(cube, shape=(nparams,)))
for i, elem in enumerate(theta):
cube[i] = elem

def multinest_loglikelihood(cube, ndim, nparams):
return loglikelihood(as_array(cube, shape=(nparams,)))

def multinest_dumper(nSamples, nlive, nPar,
physLive, posterior, paramConstr,
maxLogLike, logZ, logZerr, nullcontext):
dumper(physLive)
dumper(physLive[:, :-1], physLive[:, -1],
posterior[:, :-2], posterior[:, -2])

pymultinest.run(multinest_loglikelihood, multinest_prior, nDims,
resume=False, verbose=True, dump_callback=multinest_dumper,
n_iter_before_update=nlive//10, n_live_points=nlive,
outputfiles_basename=root, sampling_efficiency=eff)
n_iter_before_update=ndump//10, n_live_points=nlive,
outputfiles_basename=root, sampling_efficiency=eff,
evidence_tolerance=0.01)
40 changes: 38 additions & 2 deletions pybambi/neuralnetworks/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ class Predictor(object):
Parameters
----------
params:
`numpy.array of` physical parameters to train on
`numpy.array` of physical parameters to train on
shape (ntrain, ndims)
logL:
Expand All @@ -24,17 +24,30 @@ class Predictor(object):
"""

def __init__(self, params, logL):
def __init__(self, params, logL, split=0.8):
"""Construct predictor from training data."""
params = numpy.array(params)
logL = numpy.array(logL)

if len(params) != len(logL):
raise ValueError("input and target must be the same length")
elif params.ndim != 2:
raise ValueError("input must be two-dimensional")
elif logL.ndim != 1:
raise ValueError("target must be one-dimensional")

nparams = len(params)
randomize = numpy.random.permutation(nparams)
params = params[randomize]
logL = logL[randomize]

self._maxLogL = numpy.max(logL)
self._minLogL = numpy.min(logL)
ntrain = int(split*nparams)
indx = [ntrain]
self.params_training, self.params_testing = numpy.split(params, indx)
self.logL_training, self.logL_testing = numpy.split(logL, indx)

def __call__(self, x):
"""Calculate proxy loglikelihood.
Expand All @@ -50,3 +63,26 @@ def __call__(self, x):
"""
err = "Predictor: You need to implement a call function"
raise NotImplementedError(err)

def uncertainty(self):
"""Uncertainty value for the trained model."""
err = "Predictor: You need to implement an uncertainty function"
raise NotImplementedError(err)

def valid(self, loglikelihood):
"""Check validity of proxy.
Checks to see if the supplied log likelihood value is within the
current range of likelihoods, including the uncertainty
Parameters
----------
loglikelihood:
Value of the log likelihood that needs checking
"""
inRange = True
if loglikelihood > self._maxLogL + self.uncertainty() \
or loglikelihood < self._minLogL - self.uncertainty():
inRange = False
return inRange
Loading

0 comments on commit f7f9c65

Please sign in to comment.