Skip to content

Commit

Permalink
merge integration to validation
Browse files Browse the repository at this point in the history
  • Loading branch information
b4pm-devops committed Nov 7, 2024
2 parents 0ed1831 + f2577e4 commit 7ded948
Show file tree
Hide file tree
Showing 10 changed files with 185 additions and 22 deletions.
4 changes: 4 additions & 0 deletions sostrades_core/datasets/dataset_info/dataset_info_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import logging
import re
from enum import Enum
from warnings import warn

from sostrades_core.datasets.dataset_info.dataset_info_v0 import DatasetInfoV0
from sostrades_core.datasets.dataset_info.dataset_info_v1 import DatasetInfoV1
Expand Down Expand Up @@ -61,5 +62,8 @@ def get_dataset_info_version(cls, dataset_mapping_key: str) -> DatasetInfoSerial
version = DatasetInfoSerializerVersion.V0
if match:
version = DatasetInfoSerializerVersion.get_enum_value(match.group(1))
else:
warn("No version in dataset info is tolerated for now but will be deprecated in future versions", UserWarning) # noqa: B028


return version
5 changes: 4 additions & 1 deletion sostrades_core/datasets/dataset_mapping.py
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,10 @@ def get_datasets_namespace_mapping_for_study(self, study_name: str, namespaces_d
dataset_info_list = {}
for namespace in namespaces_dict.keys():
study_namespace = namespace.replace(self.STUDY_PLACEHOLDER, study_name)
dataset_info_list.update({dataset_id:{study_namespace:mapping_data} for dataset_id, mapping_data in self.get_datasets_info_from_namespace(namespace, study_name).items()})
for dataset, mapping_data in self.get_datasets_info_from_namespace(namespace, study_name).items():
dataset_info_list[dataset] = dataset_info_list.get(dataset, {})
dataset_info_list[dataset][study_namespace] = dataset_info_list[dataset].get(study_namespace, {})
dataset_info_list[dataset][study_namespace].update(mapping_data)

for dataset, namespaces_mapping_dict in dataset_info_list.items():
try:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ class AbstractDatasetsSerializer(abc.ABC):
Abstract class to inherit in order to build specific datasets connector
"""
__logger = logging.getLogger(__name__)
SOSTRADES_TYPES = {'string', 'int', 'float', 'bool', 'list', 'dict', 'dataframe', 'array'}

@abc.abstractmethod
def convert_from_dataset_data(self, data_name: str, data_value: Any, data_types_dict: dict[str:str]) -> Any:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -322,7 +322,11 @@ def __serialize_into_filesystem(self, serialization_function: Callable[[str, Any

def _deserialize_dataframe(self, data_value: str, data_name: str = None) -> pd.DataFrame:
# NB: dataframe csv deserialization as in webapi
return self.__deserialize_from_filesystem(_load_dataframe, data_value)
try:
return self.__deserialize_from_filesystem(_load_dataframe, data_value)
except Exception as error:
self.__logger.warning(f"Error while trying to convert data {data_name} with value {data_value} into the type dataframe: {error}")
return pd.DataFrame()

def _deserialize_array(self, data_value: str) -> np.ndarray:
# NB: to be improved with astype(subtype) along subtype management
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,15 +53,16 @@ def convert_from_dataset_data(self, data_name: str, data_value: Any, data_types_

converted_data = ""
try:
if data_type in ['string', 'int', 'float', 'bool', 'list', 'dict']:
converted_data = data_value
elif data_type == 'dataframe':
converted_data = self._deserialize_dataframe(data_value, data_name)
elif data_type == 'array':
converted_data = self._deserialize_array(data_value)
else:
converted_data = data_value
self.__logger.warning(f"Data type {data_type} for data {data_name} not found in default type list 'string', 'int', 'float', 'bool', 'list', 'dict', 'dataframe, 'array'.")
if data_type in self.SOSTRADES_TYPES:
if data_value is None:
converted_data = data_value
elif data_type == 'dataframe':
converted_data = self._deserialize_dataframe(data_value, data_name)
elif data_type == 'array':
converted_data = self._deserialize_array(data_value)
else:
converted_data = data_value
self.__logger.warning(f"Data type {data_type} for data {data_name} not found in default type list 'string', 'int', 'float', 'bool', 'list', 'dict', 'dataframe, 'array'.")
except Exception as error:
converted_data = data_value
self.__logger.warning(f"Error while trying to convert data {data_name} with value {data_value} into the type {data_type}: {error}")
Expand All @@ -86,16 +87,17 @@ def convert_to_dataset_data(self, data_name: str, data_value: Any, data_types_di

converted_data = ""
try:
if data_type in ['string', 'int', 'float', 'bool', 'list', 'dict']:
converted_data = self._serialize_jsonifiable(data_value, data_name)
elif data_type == 'dataframe':
# convert dataframe into dict with orient='list' to have {column:values}
converted_data = self._serialize_dataframe(data_value, data_name)
elif data_type == 'array':
converted_data = self._serialize_array(data_value, data_name)
else:
converted_data = data_value
self.__logger.warning(f"Data type {data_type} for data {data_name} not found in default type list 'string', 'int', 'float', 'bool', 'list', 'dict', 'dataframe, 'array'.")
if data_type in self.SOSTRADES_TYPES:
if data_value is None:
converted_data = data_value
elif data_type == 'dataframe':
# convert dataframe into dict with orient='list' to have {column:values}
converted_data = self._serialize_dataframe(data_value, data_name)
elif data_type == 'array':
converted_data = self._serialize_array(data_value, data_name)
else:
converted_data = self._serialize_jsonifiable(data_value, data_name)
self.__logger.warning(f"Data type {data_type} for data {data_name} not found in default type list 'string', 'int', 'float', 'bool', 'list', 'dict', 'dataframe, 'array'.")
except Exception as error:
converted_data = data_value
self.__logger.warning(f"Error while trying to convert data {data_name} with value {data_value} into the type {data_type}: {error}")
Expand Down
1 change: 1 addition & 0 deletions sostrades_core/execution_engine/data_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -525,6 +525,7 @@ def apply_parameter_change(self,
date=datetime.now(),
dataset_data_path=dataset_data_path,
variable_key=variable_key))

dm_data[VALUE] = new_value

def export_data_in_datasets(self, datasets_mapping: DatasetsMapping) -> None:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,21 @@ Where:
- $r$ [^16]
- $\Delta T$ is the world

### 3. text in formula
Therefore the variables $\text{My new variable}_{t}$ and $\text{My other variable}_{t}$ are updated for all years $t \geq t$.

$\text{function fn (called f)} = \text{big F function }\text{\%} \times \text{function G}$

$\text{Delta X}_{t} = \text{result}_{t} - \text{y}_{t} + a^{\text{b}}_{t} + a^{\text{variable c}}_{t}$

$$\text{F1 function (X,\$)} = \frac{\text{ab, M\$}}{\text{number of data}} \times 10^6$$
$$\text{F2 function (Y, \%)} = \frac{\text{cd}}{\text{number}} \times 100$$
$$\text{F3 function for another F1 (\%)} = \frac{\text{function G}}{\text{temp test}} \times 100$$
$$\text{function F4 (Z, \%)} = \frac{\text{yz}}{\text{reset x}} \times 100$$
$$\text{Total of all functions, M\$} = \text{ruslt of summ}$$

$$\text{Mass Loss (}\text{\%}\text{)} = \max\left(0, 0.1 \times (P - 9.81) \times 100\right)$$

END.

## Sources
Expand Down
29 changes: 29 additions & 0 deletions sostrades_core/tests/data/test_92_export_mapping_disc1_disc2.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
{
"process_module_path": "sostrades_core.sos_processes.test.test_disc1_all_types",
"namespace_datasets_mapping": {
"v0|<study_ph>|x": [
"MVP0_local_datasets_connector_export_test|test_dataset_disc1_disc2|x"
],
"v0|<study_ph>.Disc1|a": [
"MVP0_local_datasets_connector_export_test|test_dataset_disc1_disc2|a"
],
"v0|<study_ph>.Disc1|b": [
"MVP0_local_datasets_connector_export_test|test_dataset_disc1_disc2|b"
],
"v0|<study_ph>.Disc1|indicator": [
"MVP0_local_datasets_connector_export_test|test_dataset_disc1_disc2|indicator"
],
"v0|<study_ph>|y": [
"MVP0_local_datasets_connector_export_test|test_dataset_disc1_disc2|y"
],
"v0|<study_ph>.Disc2|constant": [
"MVP0_local_datasets_connector_export_test|test_dataset_disc1_disc2|constant"
],
"v0|<study_ph>.Disc2|power": [
"MVP0_local_datasets_connector_export_test|test_dataset_disc1_disc2|power"
],
"v0|<study_ph>|z": [
"MVP0_local_datasets_connector_export_test|test_dataset_disc1_disc2|z"
]
}
}
35 changes: 35 additions & 0 deletions sostrades_core/tests/l0_test_20_charts.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
'''
Copyright 2022 Airbus SAS
Modifications on 02/01/2024-2024/06/28 Copyright 2024 Capgemini
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
Expand Down Expand Up @@ -638,3 +639,37 @@ def test_20_create_plotly_native_chart(self):

# plotly_native_chart.to_plotly().show()
plotly_native_chart.to_plotly()

def test_21_create_indicator_chart(self):

import plotly.graph_objects as go

from sostrades_core.tools.post_processing.indicator_charts.instanciated_indicator_gauge_chart import (
InstantiatedIndicatorChart,
)

fig = go.Figure()
fig.add_trace(go.Indicator())
value = 50.85
indicator_chart = InstantiatedIndicatorChart(
value=value,
mode="gauge+number",
title={'text': ' Plotly Indicator chart'},
gauge={
'axis': {'range': [0, 100]},
'steps': [
{'range': [0, 33], 'color': "red"},
{'range': [33, 66], 'color': "orange"},
{'range': [66, 100], 'color': "green"},

],
'threshold': {
'line': {'color': 'black', 'width': 4},
'thickness': 0.8,
'value': value,
},
"bar": {"color": "black"}
}
)
# indicator_chart.to_plotly().show()
indicator_chart.to_plotly()
71 changes: 70 additions & 1 deletion sostrades_core/tests/l0_test_92_datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,9 @@
from sostrades_core.datasets.datasets_connectors.datasets_connector_factory import DatasetConnectorType
from sostrades_core.datasets.datasets_connectors.datasets_connector_manager import DatasetsConnectorManager
from sostrades_core.sos_processes.test.test_disc1_all_types.usecase_dataset import Study
from sostrades_core.sos_processes.test.test_disc1_disc2_coupling.usecase_coupling_2_disc_test import (
Study as StudyDisc1Disc2,
)
from sostrades_core.study_manager.study_manager import StudyManager


Expand Down Expand Up @@ -276,6 +279,72 @@ def test_07_datasets_local_connector_with_all_non_nested_types(self):
self.assertEqual(dm.get_value("usecase_dataset.Disc1.b_bool"), False)
self.assertTrue((dm.get_value("usecase_dataset.Disc1.d") == pd.DataFrame({"years": [2023, 2024], "x": [1.0, 10.0]})).all().all())

def test_07b_datasets_local_connector_with_several_namespace(self):
"""
Check correctness of loaded values after loading a handcrafted local directories' dataset, testing usage of
LocalDatasetsConnector and FileSystemDatasetsSerializer. and more than one namespace
"""
usecase_file_path = sostrades_core.sos_processes.test.test_disc1_disc2_coupling.usecase_coupling_2_disc_test.__file__
process_path = os.path.dirname(usecase_file_path)
study = StudyDisc1Disc2()
study.load_data()
study.run()
dm = study.execution_engine.dm

data_types_dict = {'a' :'float',
'x' :'float',
'b' :'float',
'y' :'float',
'z' :'float',
'constant' :'float',
'power' :'int',
'indicator' :'float'
}

# export study in another folder
# create connector test for export
connector_args = {
"root_directory_path": "./sostrades_core/tests/data/local_datasets_db_export_test/",
"create_if_not_exists": True
}

export_connector = DatasetsConnectorManager.register_connector(connector_identifier="MVP0_local_datasets_connector_export_test",
connector_type=DatasetConnectorType.get_enum_value("Local"),
**connector_args)
test_data_folder = os.path.join(os.path.dirname(__file__), "data")
export_mapping_repo_file_path = os.path.join(test_data_folder, "test_92_export_mapping_disc1_disc2.json")

# test export
mapping = DatasetsMapping.from_json_file(export_mapping_repo_file_path)
study.export_data_from_dataset_mapping(mapping)
exported_data = export_connector.get_values_all(DatasetInfoV0("MVP0_local_datasets_connector_export_test",
"test_dataset_disc1_disc2"),data_types_dict)
self.assertEqual(dm.get_value("usecase_coupling_2_disc_test.Disc1.a"), exported_data.get("a"))
self.assertEqual(dm.get_value("usecase_coupling_2_disc_test.x"), exported_data.get("x"))
self.assertEqual(dm.get_value("usecase_coupling_2_disc_test.Disc1.b"), exported_data.get("b"))
self.assertEqual(dm.get_value("usecase_coupling_2_disc_test.Disc1.indicator"), exported_data.get("indicator"))
self.assertEqual(dm.get_value("usecase_coupling_2_disc_test.y"), exported_data.get("y"))
self.assertEqual(dm.get_value("usecase_coupling_2_disc_test.Disc2.constant"), exported_data.get("constant"))
self.assertEqual(dm.get_value("usecase_coupling_2_disc_test.Disc2.power"), exported_data.get("power"))
self.assertEqual(dm.get_value("usecase_coupling_2_disc_test.z"), exported_data.get("z"))

# test import
study2 = StudyManager(file_path=usecase_file_path)
study2.update_data_from_dataset_mapping(mapping)
dm2 = study2.execution_engine.dm
self.assertEqual(dm2.get_value("usecase_coupling_2_disc_test.Disc1.a"), exported_data.get("a"))
self.assertEqual(dm2.get_value("usecase_coupling_2_disc_test.x"), exported_data.get("x"))
self.assertEqual(dm2.get_value("usecase_coupling_2_disc_test.Disc1.b"), exported_data.get("b"))
self.assertEqual(dm2.get_value("usecase_coupling_2_disc_test.Disc1.indicator"), exported_data.get("indicator"))
self.assertEqual(dm2.get_value("usecase_coupling_2_disc_test.y"), exported_data.get("y"))
self.assertEqual(dm2.get_value("usecase_coupling_2_disc_test.Disc2.constant"), exported_data.get("constant"))
self.assertEqual(dm2.get_value("usecase_coupling_2_disc_test.Disc2.power"), exported_data.get("power"))
self.assertEqual(dm2.get_value("usecase_coupling_2_disc_test.z"), exported_data.get("z"))


export_connector.clear(remove_root_directory=True)


def test_08_json_to_local_connector_conversion_and_loading(self):
"""
Use a local connector to copy values from a JSON connector then load them in the study and check correctness,
Expand Down Expand Up @@ -968,4 +1037,4 @@ def test_22_compatibility_V0_V1(self):
if __name__ == "__main__":
cls = TestDatasets()
cls.setUp()
cls.test_22_compatibility_V0_V1()
cls.test_07b_datasets_local_connector_with_several_namespace()

0 comments on commit 7ded948

Please sign in to comment.