Skip to content

Commit

Permalink
Updated moe to work with different models
Browse files Browse the repository at this point in the history
  • Loading branch information
teubert committed Oct 16, 2023
1 parent 538ce5f commit fb3f5d2
Show file tree
Hide file tree
Showing 3 changed files with 178 additions and 17 deletions.
112 changes: 100 additions & 12 deletions src/progpy/mixture_of_experts.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ class MixtureOfExpertsModel(CompositeModel):
Typically, outputs are provided in the MoE model input when performing a state estimation step (i.e. when there is measured data) but not when predicting forward (i.e. when the output is unknown).
When calling output, event_state, threshold_met, or performance_metrics, only the model with the best score will be called, and those results returned. In case of a tie, the first model (in the order provided by the constructor) of the tied models will be used.
When calling output, event_state, threshold_met, or performance_metrics, only the model with the best score will be called, and those results returned. In case of a tie, the first model (in the order provided by the constructor) of the tied models will be used. If not every outputs, event, or performance metric has been identified, the next best model will be used to fill in the blanks, and so on.
Args:
models (list[PrognosticsModel]): List of at least 2 models that form the ensemble
Expand Down Expand Up @@ -162,7 +162,7 @@ def next_state(self, x, u, dt):

return x

def best_model(self, x):
def best_model(self, x, _excepting=[]):
"""
Get the best-performing model according to the scores
Expand All @@ -175,30 +175,118 @@ def best_model(self, x):
# Identify best model
best_value = -1
for i, (key, _) in enumerate(self.parameters['models']):
if key in _excepting:
continue # Skip excepting
score_key = key + DIVIDER + "_score"
if x[score_key] > best_value:
best_value = x[score_key]
best_index = i
return self.parameters['models'][best_index]

def output(self, x):
name, m = self.best_model(x)
excepting = []
outputs_seen = set()
z = {}
while outputs_seen != set(self.outputs):
# Not all outputs have been calculated
name, m = self.best_model(x, _excepting=excepting)
excepting.append(name)

new_outputs = set(m.outputs) - outputs_seen
if len(new_outputs) > 0:
# Has an output that hasn't been seen

# Prepare state
x_i = m.StateContainer({key: x[name + '.' + key] for key in m.states})
z_i = m.output(x_i)

# Merge in new outputs
for key in new_outputs:
z[key] = z_i[key]

# Prepare state
x_i = m.StateContainer({key: x[name + '.' + key] for key in m.states})
return m.output(x_i)
# Add new outputs
outputs_seen |= new_outputs

return self.OutputContainer(z)

def event_state(self, x):
name, m = self.best_model(x)
x_i = m.StateContainer({key: x[name + '.' + key] for key in m.states})
return m.event_state(x_i)
excepting = []
events_seen = set()
es = {}
while events_seen != set(self.events):
# Not all outputs have been calculated
name, m = self.best_model(x, _excepting=excepting)
excepting.append(name)

new_events = set(m.events) - events_seen
if len(new_events) > 0:
# Has an event that hasn't been seen

# Prepare state
x_i = m.StateContainer({key: x[name + '.' + key] for key in m.states})
es_i = m.event_state(x_i)

# Merge in new events
for key in new_events:
es[key] = es_i[key]

# Add new events
events_seen |= new_events

return es

def threshold_met(self, x):
name, m = self.best_model(x)
x_i = m.StateContainer({key: x[name + '.' + key] for key in m.states})
return m.threshold_met(x_i)
excepting = []
events_seen = set()
tm = {}
while events_seen != set(self.events):
# Not all outputs have been calculated
name, m = self.best_model(x, _excepting=excepting)
excepting.append(name)

new_events = set(m.events) - events_seen
if len(new_events) > 0:
# Has an event that hasn't been seen

# Prepare state
x_i = m.StateContainer({key: x[name + '.' + key] for key in m.states})
tm_i = m.threshold_met(x_i)

# Merge in new events
for key in new_events:
tm[key] = tm_i[key]

# Add new events
events_seen |= new_events

return tm

def performance_metrics(self, x):
excepting = []
performance_metrics_seen = set()
pm = {}
while performance_metrics_seen != set(self.performance_metric_keys):
# Not all outputs have been calculated
name, m = self.best_model(x, _excepting=excepting)
excepting.append(name)

new_performance_metrics = set(m.performance_metric_keys) - performance_metrics_seen
if len(new_performance_metrics) > 0:
# Has an performance metrics that hasn't been seen

# Prepare state
x_i = m.StateContainer({key: x[name + '.' + key] for key in m.states})
pm_i = m.performance_metrics(x_i)

# Merge in new events
for key in new_performance_metrics:
pm[key] = pm_i[key]

# Add new events
performance_metrics_seen |= new_performance_metrics

return pm

name, m = self.best_model(x)
x_i = m.StateContainer({key: x[name + '.' + key] for key in m.states})
return m.performance_metrics(x_i)
42 changes: 42 additions & 0 deletions src/progpy/models/test_models/other_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,3 +40,45 @@ def event_state(self, x):

def threshold_met(self, x):
return {'x0==10': x['x0']>=10}

class OneInputTwoOutputsOneEvent_alt(PrognosticsModel):
"""
Simple example model where x0 increases by a * u0. Designed to be slightly different than OneInputTwoOutputsOneEvent
"""
inputs = ['u0']
states = ['x0']
outputs = ['x0+d', 'x0+c']
events = ['x0==10', 'x0==7']

default_parameters = {
'x0': { # Initial State
'x0': 0
},
'a': 1,
'd': 1,
'c': 1
}

def dx(self, x, u):
return self.StateContainer({
'x0': self.parameters['a'] * u['u0']
})

def output(self, x):
return self.OutputContainer({
'x0+d': x['x0'] + self.parameters['d'],
'x0+c': x['x0'] + self.parameters['c']
})

def event_state(self, x):
return {
'x0==10': 1-x['x0']/10,
'x0==7': 1-x['x0']/7
}

def threshold_met(self, x):
return {
'x0==10': x['x0']>=10,
'x0==7': x['x0']>=7
}

41 changes: 36 additions & 5 deletions tests/test_moe.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import unittest

from progpy import MixtureOfExpertsModel
from progpy.models.test_models.other_models import OneInputTwoOutputsOneEvent
from progpy.models.test_models.other_models import OneInputTwoOutputsOneEvent, OneInputTwoOutputsOneEvent_alt


class TestMoE(unittest.TestCase):
Expand All @@ -18,10 +18,6 @@ def tearDown(self):
sys.stdout = sys.__stdout__

def testSameModel(self):
DT = 0.25

m_gt = OneInputTwoOutputsOneEvent(a=1.2)

m1 = OneInputTwoOutputsOneEvent(a=2.3, b=0.75, c=0.75)
m2 = OneInputTwoOutputsOneEvent(a=1.19) # best option
m3 = OneInputTwoOutputsOneEvent(a=0.95, b=0.85, c=0.85)
Expand Down Expand Up @@ -88,6 +84,41 @@ def testSameModel(self):
self.assertGreater(x['OneInputTwoOutputsOneEvent_3._score'], 0.48*0.8)
self.assertLess(x['OneInputTwoOutputsOneEvent_3._score'], 0.52*0.8)

def test_heterogeneous_models(self):
m1 = OneInputTwoOutputsOneEvent(a=2.3, b=0.75, c=0.75)
m2 = OneInputTwoOutputsOneEvent(a=1.19) # best option
m3 = OneInputTwoOutputsOneEvent_alt(a=1.17, d=0.85, c=0.85) # different class

m_moe = MixtureOfExpertsModel((m1, m2, m3))
self.assertSetEqual(set(m_moe.inputs), set(OneInputTwoOutputsOneEvent.inputs + OneInputTwoOutputsOneEvent.outputs + OneInputTwoOutputsOneEvent_alt.outputs))
self.assertSetEqual(set(m_moe.outputs), set(OneInputTwoOutputsOneEvent.outputs + OneInputTwoOutputsOneEvent_alt.outputs))
self.assertSetEqual(set(m_moe.events), set(OneInputTwoOutputsOneEvent.events + OneInputTwoOutputsOneEvent_alt.events))
self.assertSetEqual(set(m_moe.states), {'OneInputTwoOutputsOneEvent.x0', 'OneInputTwoOutputsOneEvent_2.x0', 'OneInputTwoOutputsOneEvent_alt.x0', 'OneInputTwoOutputsOneEvent._score', 'OneInputTwoOutputsOneEvent_2._score', 'OneInputTwoOutputsOneEvent_alt._score'})

x0 = m_moe.initialize()

# Next_state uses first model (since scores are equal)
x = m_moe.next_state(x0, m_moe.InputContainer({'u0': 2}), 1)
z = m_moe.output(x) # This is where it "chooses one"
# Since scores are equal it should choose the first one
# Which meanes x0 is 4.6 and b, c are 0.75, and d is 0.85 (and x0 for that one is 2.34)
self.assertEqual(z['x0+b'], 5.35)
self.assertEqual(z['x0+c'], 5.35)
self.assertEqual(z['x0+d'], 3.19)

es = m_moe.event_state(x)
self.assertEqual(es['x0==10'], 0.54) # 1 - 0.46/10 (uses x0 for model 1)
self.assertEqual(es['x0==7'], 1-2.34/7) # (uses x0 for model 3)

tm = m_moe.threshold_met(x)
self.assertFalse(tm['x0==10'])
self.assertFalse(tm['x0==7'])

x['OneInputTwoOutputsOneEvent_alt.x0'] = 20 # Will only effect the state for model 3
tm = m_moe.threshold_met(x)
self.assertFalse(tm['x0==10'])
self.assertTrue(tm['x0==7'])

# This allows the module to be executed directly
def main():
load_test = unittest.TestLoader()
Expand Down

0 comments on commit fb3f5d2

Please sign in to comment.