Skip to content

Commit

Permalink
Merge pull request #151 from neurolib-dev/fix/exploration/save_lists_…
Browse files Browse the repository at this point in the history
…in_results

Support lists as exploration and evolution outputs
  • Loading branch information
caglorithm authored Mar 31, 2021
2 parents 86a2299 + 885cea4 commit 3517cfc
Show file tree
Hide file tree
Showing 4 changed files with 32 additions and 17 deletions.
19 changes: 13 additions & 6 deletions neurolib/optimize/evolution/evolution.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,7 @@ def __init__(

# -------- settings
self.verbose = False
self.verbose_plotting = True
self.plotColor = "C0"

# -------- simulation
Expand Down Expand Up @@ -235,7 +236,7 @@ def __init__(
self.evaluationCounter = 0
self.last_id = 0

def run(self, verbose=False):
def run(self, verbose=False, verbose_plotting=True):
"""Run the evolution or continue previous evolution. If evolution was not initialized first
using `runInitial()`, this will be done.
Expand All @@ -244,6 +245,7 @@ def run(self, verbose=False):
"""

self.verbose = verbose
self.verbose_plotting = verbose_plotting
if not self._initialPopulationSimulated:
self.runInitial()

Expand Down Expand Up @@ -650,7 +652,7 @@ def runEvolution(self):

# verbose output
if self.verbose:
self.info(plot=True, info=True)
self.info(plot=self.verbose_plotting, info=True)

logging.info("--- End of evolution ---")
logging.info("Best individual is %s, %s" % (self.best_ind, self.best_ind.fitness.values))
Expand Down Expand Up @@ -710,7 +712,7 @@ def info(self, plot=True, bestN=5, info=True, reverse=False):
print(f"Best {bestN} individuals:")
eu.printIndividuals(self.toolbox.selBest(self.pop, bestN), self.paramInterval)
print("--------------------")
# Plotting
# Plotting evolutionary progress
if plot:
# hack: during the evolution we need to use reverse=True
# after the evolution (with evolution.info()), we need False
Expand Down Expand Up @@ -785,22 +787,27 @@ def _outputToDf(self, pop, df):
:return: Dataframe with outputs
:rtype: pandas.core.frame.DataFrame
"""
# defines which variable types will be saved in the results dataframe
SUPPORTED_TYPES = (float, int, np.ndarray, list)
SCALAR_TYPES = (float, int)
ARRAY_TYPES = (np.ndarray, list)

assert len(pop) == len(df), "Dataframe and population do not have same length."
nan_value = np.nan
# load outputs into dataframe
for i, p in enumerate(pop):
if hasattr(p, "outputs"):
for key, value in p.outputs.items():
# only save floats, ints and arrays
if isinstance(value, (float, int, np.ndarray)):
if isinstance(value, SUPPORTED_TYPES):
# save 1-dim arrays
if isinstance(value, np.ndarray):
if isinstance(value, ARRAY_TYPES):
# to save a numpy array, convert column to object type
if key not in df:
df[key] = None
df[key] = df[key].astype(object)
df.at[i, key] = value
elif isinstance(value, (float, int)):
elif isinstance(value, SCALAR_TYPES):
# save numbers
df.loc[i, key] = value
else:
Expand Down
15 changes: 9 additions & 6 deletions neurolib/optimize/exploration/exploration.py
Original file line number Diff line number Diff line change
Expand Up @@ -352,9 +352,12 @@ def aggregateResultsToDfResults(self, arrays=True, fillna=False):
:type fillna: bool, optional
"""
nan_value = np.nan
logging.info("Aggregating results to `dfResults` ...")
# for i, result in tqdm.tqdm(self.results.items()):
# defines which variable types will be saved in the results dataframe
SUPPORTED_TYPES = (float, int, np.ndarray, list)
SCALAR_TYPES = (float, int)
ARRAY_TYPES = (np.ndarray, list)

logging.info("Aggregating results to `dfResults` ...")
for runId, parameters in tqdm.tqdm(self.dfResults.iterrows(), total=len(self.dfResults)):
# if the results were previously loaded into memory, use them
if hasattr(self, "results"):
Expand All @@ -370,16 +373,16 @@ def aggregateResultsToDfResults(self, arrays=True, fillna=False):

for key, value in result.items():
# only save floats, ints and arrays
if isinstance(value, (float, int, np.ndarray)):
if isinstance(value, SUPPORTED_TYPES):
# save 1-dim arrays
if isinstance(value, np.ndarray) and arrays:
if isinstance(value, ARRAY_TYPES) and arrays:
# to save a numpy array, convert column to object type
if key not in self.dfResults:
self.dfResults[key] = None
self.dfResults[key] = self.dfResults[key].astype(object)
self.dfResults.at[runId, key] = value
elif isinstance(value, (float, int)):
# save numbers
elif isinstance(value, SCALAR_TYPES):
# save scalars
self.dfResults.loc[runId, key] = value
else:
self.dfResults.loc[runId, key] = nan_value
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

setuptools.setup(
name="neurolib",
version="0.5.13",
version="0.5.14",
description="Easy whole-brain neural mass modeling",
long_description=long_description,
long_description_content_type="text/markdown",
Expand Down
13 changes: 9 additions & 4 deletions tests/test_exploration.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
import neurolib.utils.pypetUtils as pu
import numpy as np
import xarray as xr

from neurolib.models.aln import ALNModel
from neurolib.models.fhn import FHNModel
from neurolib.models.multimodel import MultiModel
Expand Down Expand Up @@ -151,19 +152,23 @@ def explore_me(traj):
pars = search.getParametersFromTraj(traj)
# let's calculate the distance to a circle
computation_result = abs((pars["x"] ** 2 + pars["y"] ** 2) - 1)
result_dict = {"distance": computation_result}
result_dict = {"scalar_result": computation_result, "list_result": [1, 2, 3, 4], "array_result": np.ones(3)}
search.saveToPypet(result_dict, traj)

parameters = ParameterSpace({"x": np.linspace(-2, 2, 2), "y": np.linspace(-2, 2, 2)})
search = BoxSearch(evalFunction=explore_me, parameterSpace=parameters, filename="test_circle_exploration.hdf")
search.run()
search.loadResults(pypetShortNames=False)

for i in search.dfResults.index:
search.dfResults.loc[i, "distance"] = search.results[i]["distance"]

# call the result dataframe
search.dfResults

# test integrity of dataframe
for i in search.dfResults.index:
self.assertEqual(search.dfResults.loc[i, "scalar_result"], search.results[i]["scalar_result"])
self.assertListEqual(search.dfResults.loc[i, "list_result"], search.results[i]["list_result"])
np.testing.assert_array_equal(search.dfResults.loc[i, "array_result"], search.results[i]["array_result"])


class TestExplorationMultiModel(unittest.TestCase):
"""
Expand Down

0 comments on commit 3517cfc

Please sign in to comment.