diff --git a/examples/01_Simulation.ipynb b/examples/01_Simulation.ipynb index dbbc75f..1905c71 100644 --- a/examples/01_Simulation.ipynb +++ b/examples/01_Simulation.ipynb @@ -328,6 +328,49 @@ "For most users running this in Jupyter notebook, the output will be truncated, but it gives an idea of what would be shown when selecting these options." ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this example we specified events='impact' to indicate that simulation should stop when the specified event 'impact' is met. By default, the simulation will stop when the first of the specified events occur. If you dont specify any events, all model events will be included (in this case ['falling', 'impact']). This means that without specifying events, execution would have ended early, when the object starts falling, like below:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results = m.simulate_to_threshold(save_freq=0.5, dt=0.1)\n", + "print('Last timestep: ', results.times[-1])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note that simulation stopped at around 3.8seconds, about when the object starts falling. \n", + "\n", + "Alternately, if we would like to execute until all events have occurred we can use the `event_strategy` argument, like below:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results = m.simulate_to_threshold(save_freq=0.5, dt=0.1, event_strategy='all')\n", + "print('Last timestep: ', results.times[-1])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Not the simulation stopped at around 7.9 seconds, when the last of the events occured ('impact')" + ] + }, { "cell_type": "markdown", "metadata": {}, diff --git a/src/progpy/predictors/monte_carlo.py b/src/progpy/predictors/monte_carlo.py index f2f8a95..96900c4 100644 --- a/src/progpy/predictors/monte_carlo.py +++ b/src/progpy/predictors/monte_carlo.py @@ -28,7 +28,8 @@ class MonteCarlo(Predictor): __DEFAULT_N_SAMPLES = 100 # Default number of samples to use, if none specified and not UncertainData default_parameters = { - 'n_samples': None + 'n_samples': None, + 'event_strategy': 'all' } def predict(self, state: UncertainData, future_loading_eqn: Callable=None, events=None, **kwargs) -> PredictionResults: @@ -50,6 +51,10 @@ def predict(self, state: UncertainData, future_loading_eqn: Callable=None, event Simulation step size (s), e.g., 0.1 events : list[str], optional Events to predict (subset of model.events) e.g., ['event1', 'event2'] + event_strategy: str, optional + Strategy for stopping evaluation. Default is 'all'. One of:\n + 'first': Will stop when first event in `events` list is reached. + 'all': Will stop when all events in `events` list have been reached horizon : float, optional Prediction horizon (s) n_samples : int, optional @@ -84,6 +89,8 @@ def predict(self, state: UncertainData, future_loading_eqn: Callable=None, event params.update(kwargs) # update for specific run params['print'] = False params['progress'] = False + # Remove event_strategy from params to not confuse simulate_to method call + event_strategy = params.pop('event_strategy') if not isinstance(state, UnweightedSamples) and params['n_samples'] is None: # if not unweighted samples, some sample number is required, so set to default. @@ -94,10 +101,20 @@ def predict(self, state: UncertainData, future_loading_eqn: Callable=None, event if events is None: # Predict to all events # change to list because of limits of jsonify - events = list(self.model.events) + if 'events' in params and params['events'] is not None: + # Set at a model level + events = list(params['events']) + else: + # Otherwise, all events + events = list(self.model.events) if len(events) == 0 and 'horizon' not in params: raise ValueError("If specifying no event (i.e., simulate to time), must specify horizon") + if 'events' in params: + # Params is provided as a argument in construction + # Remove it so it's not passed to simulate_to* + del params['events'] + # Sample from state if n_samples specified or state is not UnweightedSamples (Case 2) # Or if is Unweighted samples, but there are the wrong number of samples (Case 1) if ( @@ -184,7 +201,12 @@ def predict(self, state: UncertainData, future_loading_eqn: Callable=None, event # An event has occured time_of_event[event] = times[-1] - events_remaining.remove(event) # No longer an event to predict to + if event_strategy == 'all': + events_remaining.remove(event) # No longer an event to predict to + elif event_strategy in ('first', 'any'): + events_remaining = [] + else: + raise ValueError(f"Invalid value for `event_strategy`: {event_strategy}. Should be either 'all' or 'first'") # Remove last state (event) params['t0'] = times.pop() diff --git a/src/progpy/predictors/unscented_transform.py b/src/progpy/predictors/unscented_transform.py index e8e3012..312b95b 100644 --- a/src/progpy/predictors/unscented_transform.py +++ b/src/progpy/predictors/unscented_transform.py @@ -89,6 +89,7 @@ class UnscentedTransformPredictor(Predictor): 'kappa': -1, 't0': 0, 'dt': 0.5, + 'event_strategy': 'all', 'horizon': 1e99, 'save_pts': [], 'save_freq': 1e99 @@ -175,6 +176,9 @@ def predict(self, state, future_loading_eqn: Callable = None, events=None, **kwa params = deepcopy(self.parameters) # copy parameters params.update(kwargs) # update for specific run + if params['event_strategy'] != 'all': + raise ValueError(f"`event_strategy` {params['event_strategy']} not supported. Currently, only 'all' event strategy is supported") + if events is None: # Predict to all events # change to list because of limits of jsonify @@ -252,7 +256,7 @@ def update_all(): all_failed = False # This event for this sigma point hasn't been met yet if all_failed: # If all events have been reched for every sigma point - break + break # Prepare Results pts = array([[e for e in ToE[key]] for key in ToE.keys()]) diff --git a/src/progpy/prognostics_model.py b/src/progpy/prognostics_model.py index f4b27e5..2b40d61 100644 --- a/src/progpy/prognostics_model.py +++ b/src/progpy/prognostics_model.py @@ -783,6 +783,13 @@ def simulate_to_threshold(self, future_loading_eqn: abc.Callable = None, first_o Keyword Arguments ----------------- + events: abc.Sequence[str] or str, optional + Keys for events that will trigger the end of simulation. + If blank, simulation will occur if any event will be met () + event_strategy: str, optional + Strategy for stopping evaluation. Default is 'first'. One of:\n + 'first': Will stop when first event in `events` list is reached. + 'all': Will stop when all events in `events` list have been reached t0 : float, optional Starting time for simulation in seconds (default: 0.0) \n dt : float, tuple, str, or function, optional @@ -802,9 +809,6 @@ def simulate_to_threshold(self, future_loading_eqn: abc.Callable = None, first_o maximum time that the model will be simulated forward (s), e.g., horizon = 1000 \n first_output : OutputContainer, optional First measured output, needed to initialize state for some classes. Can be omitted for classes that don't use this - events: abc.Sequence[str] or str, optional - Keys for events that will trigger the end of simulation. - If blank, simulation will occur if any event will be met () x : StateContainer, optional initial state, e.g., x= m.StateContainer({'x1': 10, 'x2': -5.3})\n thresholds_met_eqn : abc.Callable, optional @@ -880,6 +884,7 @@ def simulate_to_threshold(self, future_loading_eqn: abc.Callable = None, first_o 't0': 0.0, 'dt': ('auto', 1.0), 'eval_pts': [], + 'event_strategy': 'first', 'save_pts': [], 'save_freq': 10.0, 'horizon': 1e100, # Default horizon (in s), essentially inf @@ -936,11 +941,21 @@ def simulate_to_threshold(self, future_loading_eqn: abc.Callable = None, first_o progress = config['progress'] # Threshold Met Equations - def check_thresholds(thresholds_met): - t_met = [thresholds_met[key] for key in events] - if len(t_met) > 0 and not np.isscalar(list(t_met)[0]): - return np.any(t_met) - return any(t_met) + if config['event_strategy'] in ('first', 'any'): + def check_thresholds(thresholds_met): + t_met = [thresholds_met[key] for key in events] + if len(t_met) > 0 and not np.isscalar(list(t_met)[0]): + return np.any(t_met) + return any(t_met) + elif config['event_strategy'] == 'all': + def check_thresholds(thresholds_met): + t_met = [thresholds_met[key] for key in events] + if len(t_met) > 0 and not np.isscalar(list(t_met)[0]): + return np.all(t_met) + return all(t_met) + else: + raise ValueError(f"Invalid value for `event_strategy`: {config['event_strategy']}. Should be either 'all' or 'first'") + if 'thresholds_met_eqn' in config: check_thresholds = config['thresholds_met_eqn'] events = [] diff --git a/tests/test_base_models.py b/tests/test_base_models.py index 75c344d..47a70f2 100644 --- a/tests/test_base_models.py +++ b/tests/test_base_models.py @@ -604,6 +604,18 @@ def load(t, x=None): (times, inputs, states, outputs, event_states) = m.simulate_to_threshold(load, {'o1': 0.8}, dt=0.5, save_freq=1.0, events=['e1', 'e2']) self.assertAlmostEqual(times[-1], 5.0, 5) + # Any event, manual - specified strategy + (times, inputs, states, outputs, event_states) = m.simulate_to_threshold(load, {'o1': 0.8}, dt=0.5, save_freq=1.0, events=['e1', 'e2'], event_strategy='first') + self.assertAlmostEqual(times[-1], 5.0, 5) + + # both e1, e2 + (times, inputs, states, outputs, event_states) = m.simulate_to_threshold(load, {'o1': 0.8}, dt=0.5, save_freq=1.0, events=['e1', 'e2'], event_strategy='all') + self.assertAlmostEqual(times[-1], 15.0, 5) + + # Any event, manual - unexpected strategy + with self.assertRaises(ValueError): + (times, inputs, states, outputs, event_states) = m.simulate_to_threshold(load, {'o1': 0.8}, dt=0.5, save_freq=1.0, events=['e1', 'e2'], event_strategy='fljsdk') + # Only event 2 (times, inputs, states, outputs, event_states) = m.simulate_to_threshold(load, {'o1': 0.8}, dt=0.5, save_freq=1.0, events=['e2']) self.assertAlmostEqual(times[-1], 15.0, 5) diff --git a/tests/test_predictors.py b/tests/test_predictors.py index c38f20f..6b6e05d 100644 --- a/tests/test_predictors.py +++ b/tests/test_predictors.py @@ -62,6 +62,15 @@ def test_pred_template(self): m = MockProgModel() pred = TemplatePredictor(m) + def test_UTP_Broken(self): + m = ThrownObject() + pred = UnscentedTransformPredictor(m) + samples = MultivariateNormalDist(['x', 'v'], [1.83, 40], [[0.1, 0.01], [0.01, 0.1]]) + + with self.assertRaises(ValueError): + # Invalid event strategy - first (not supported) + pred.predict(samples, dt=0.2, num_samples=3, save_freq=1, event_strategy='first') + def test_UTP_ThrownObject(self): m = ThrownObject() pred = UnscentedTransformPredictor(m) @@ -122,7 +131,15 @@ def future_loading(t, x=None): s = results.time_of_event.sample(100).key('EOD') samples.eol_metrics(s) # Kept for backwards compatibility - def test_MC(self): + def test_MC_Broken(self): + m = ThrownObject() + mc = MonteCarlo(m) + + with self.assertRaises(ValueError): + # Invalid event strategy + mc.predict(m.initialize(), dt=0.2, num_samples=3, save_freq=1, event_strategy='fdksl') + + def test_MC_ThrownObject(self): m = ThrownObject() mc = MonteCarlo(m) @@ -131,6 +148,22 @@ def test_MC(self): self.assertAlmostEqual(results.time_of_event.mean['impact'], 8.0, 5) self.assertAlmostEqual(results.time_of_event.mean['falling'], 3.8, 5) + # event_strategy='all' should act the same + results = mc.predict(m.initialize(), dt=0.2, num_samples=3, save_freq=1, event_strategy='all') + self.assertAlmostEqual(results.time_of_event.mean['impact'], 8.0, 5) + self.assertAlmostEqual(results.time_of_event.mean['falling'], 3.8, 5) + + def test_MC_ThrownObject_First(self): + # Test thrown object, similar to test_UKP_ThrownObject, but with only the first event (i.e., 'falling') + + m = ThrownObject() + mc = MonteCarlo(m) + mc_results = mc.predict(m.initialize(), dt=0.2, event_strategy='first', num_samples=3, save_freq=1) + + self.assertAlmostEqual(mc_results.time_of_event.mean['falling'], 3.8, 10) + self.assertTrue('impact' not in mc_results.time_of_event.mean) + self.assertAlmostEqual(mc_results.times[-1], 3, 1) # Saving every second, last time should be around the nearest 1s before falling event + def test_prediction_mvnormaldist(self): times = list(range(10)) covar = [[0.1, 0.01], [0.01, 0.1]]