diff --git a/.circleci/config.yml b/.circleci/config.yml index d5ebb5d5..01e9dab4 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -163,7 +163,7 @@ jobs: cd /tmp grid2op.testinstall - legacy_lightsim: + legacy_lightsim_old_pp: executor: python38 # needs to be 38: whl of lightsim were not released for 3.10 at the time resource_class: small steps: @@ -190,6 +190,33 @@ jobs: export _GRID2OP_FORCE_TEST=1 python -m unittest grid2op/tests/test_basic_env_ls.py + legacy_lightsim: + executor: python38 # needs to be 38: whl of lightsim were not released for 3.10 at the time + resource_class: small + steps: + - checkout + - run: + command: | + apt-get update + apt-get install -y coinor-cbc + - run: python -m pip install virtualenv + - run: python -m virtualenv venv_test + - run: + command: | + source venv_test/bin/activate + python -m pip install -U pip setuptools wheel + python -m pip install -U lightsim2grid==0.6.0 gymnasium "numpy<1.22" + - run: + command: | + source venv_test/bin/activate + python -m pip install -e . + pip freeze + - run: + command: | + source venv_test/bin/activate + export _GRID2OP_FORCE_TEST=1 + python -m unittest grid2op/tests/test_basic_env_ls.py + install39: executor: python39 resource_class: small @@ -340,6 +367,7 @@ workflows: test: jobs: - test + - legacy_lightsim_old_pp - legacy_lightsim install: jobs: diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 3d495038..3f3225e6 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -73,6 +73,14 @@ Next release - [FIXED] another issue with the seeding of `MultifolderWithCache`: the seed was not used correctly on the cache data when calling `chronics_handler.reset` multiple times without any changes +- [FIXED] `Backend` now properly raise EnvError (grid2op exception) instead of previously + `EnvironmentError` (python default exception) +- [FIXED] a bug in `PandaPowerBackend` (missing attribute) causing directly + https://github.com/rte-france/Grid2Op/issues/617 +- [FIXED] a bug in `Environment`: the thermal limit were used when loading the environment + even before the "time series" are applied (and before the user defined thermal limits were set) + which could lead to disconnected powerlines even before the initial step (t=0, when time + series are loaded) - [ADDED] possibility to skip some step when calling `env.reset(..., options={"init ts": ...})` - [ADDED] possibility to limit the duration of an episode with `env.reset(..., options={"max step": ...})` - [ADDED] possibility to specify the "reset_options" used in `env.reset` when diff --git a/grid2op/Backend/backend.py b/grid2op/Backend/backend.py index 3e2b96d2..9bcaa161 100644 --- a/grid2op/Backend/backend.py +++ b/grid2op/Backend/backend.py @@ -1019,12 +1019,7 @@ def _runpf_with_diverging_exception(self, is_dc : bool) -> Optional[Exception]: conv, exc_me = self.runpf(is_dc=is_dc) # run powerflow except Grid2OpException as exc_: exc_me = exc_ - # except Exception as exc_: - # exc_me = DivergingPowerflow( - # f" An unexpected error occurred during the computation of the powerflow." - # f"The error is: \n {exc_} \n. This is game over" - # ) - + if not conv and exc_me is None: exc_me = DivergingPowerflow( "GAME OVER: Powerflow has diverged during computation " @@ -2160,22 +2155,22 @@ def assert_grid_correct_after_powerflow(self) -> None: if tmp.shape[0] != self.n_line: raise IncorrectNumberOfLines('returned by "backend.get_line_status()"') if (~np.isfinite(tmp)).any(): - raise EnvironmentError(type(self).ERR_INIT_POWERFLOW) + raise EnvError(type(self).ERR_INIT_POWERFLOW) tmp = self.get_line_flow() if tmp.shape[0] != self.n_line: raise IncorrectNumberOfLines('returned by "backend.get_line_flow()"') if (~np.isfinite(tmp)).any(): - raise EnvironmentError(type(self).ERR_INIT_POWERFLOW) + raise EnvError(type(self).ERR_INIT_POWERFLOW) tmp = self.get_thermal_limit() if tmp.shape[0] != self.n_line: raise IncorrectNumberOfLines('returned by "backend.get_thermal_limit()"') if (~np.isfinite(tmp)).any(): - raise EnvironmentError(type(self).ERR_INIT_POWERFLOW) + raise EnvError(type(self).ERR_INIT_POWERFLOW) tmp = self.get_line_overflow() if tmp.shape[0] != self.n_line: raise IncorrectNumberOfLines('returned by "backend.get_line_overflow()"') if (~np.isfinite(tmp)).any(): - raise EnvironmentError(type(self).ERR_INIT_POWERFLOW) + raise EnvError(type(self).ERR_INIT_POWERFLOW) tmp = self.generators_info() if len(tmp) != 3: diff --git a/grid2op/Backend/pandaPowerBackend.py b/grid2op/Backend/pandaPowerBackend.py index 95876334..299043b6 100644 --- a/grid2op/Backend/pandaPowerBackend.py +++ b/grid2op/Backend/pandaPowerBackend.py @@ -17,6 +17,8 @@ import pandapower as pp import scipy +# check that pandapower does not introduce some +from packaging import version import grid2op from grid2op.dtypes import dt_int, dt_float, dt_bool @@ -24,6 +26,8 @@ from grid2op.Exceptions import BackendError from grid2op.Backend.backend import Backend +MIN_LS_VERSION_VM_PU = version.parse("0.6.0") + try: import numba NUMBA_ = True @@ -223,6 +227,7 @@ def __init__( self._in_service_line_col_id = None self._in_service_trafo_col_id = None self._in_service_storage_cold_id = None + self.div_exception = None def _check_for_non_modeled_elements(self): """This function check for elements in the pandapower grid that will have no impact on grid2op. @@ -353,30 +358,15 @@ def load_grid(self, i_ref = None self._iref_slack = None self._id_bus_added = None - with warnings.catch_warnings(): - warnings.filterwarnings("ignore") - try: - pp.runpp( - self._grid, - numba=self.with_numba, - lightsim2grid=self._lightsim2grid, - distributed_slack=self._dist_slack, - max_iteration=self._max_iter, - ) - except pp.powerflow.LoadflowNotConverged: - pp.rundcpp( - self._grid, - numba=self.with_numba, - lightsim2grid=self._lightsim2grid, - distributed_slack=self._dist_slack, - max_iteration=self._max_iter, - ) + + self._aux_run_pf_init() # run an intiail powerflow, just in case + new_pp_version = False if not "slack_weight" in self._grid.gen: self._grid.gen["slack_weight"] = 1.0 else: new_pp_version = True - + if np.all(~self._grid.gen["slack"]): # there are not defined slack bus on the data, i need to hack it up a little bit pd2ppc = self._grid._pd2ppc_lookups["bus"] # pd2ppc[pd_id] = ppc_id @@ -438,24 +428,7 @@ def load_grid(self, else: self.slack_id = (self._grid.gen["slack"].values).nonzero()[0] - with warnings.catch_warnings(): - warnings.filterwarnings("ignore") - try: - pp.runpp( - self._grid, - numba=self.with_numba, - lightsim2grid=self._lightsim2grid, - distributed_slack=self._dist_slack, - max_iteration=self._max_iter, - ) - except pp.powerflow.LoadflowNotConverged: - pp.rundcpp( - self._grid, - numba=self.with_numba, - lightsim2grid=self._lightsim2grid, - distributed_slack=self._dist_slack, - max_iteration=self._max_iter, - ) + self._aux_run_pf_init() # run another powerflow with the added generator self.__nb_bus_before = self._grid.bus.shape[0] self.__nb_powerline = self._grid.line.shape[0] @@ -567,12 +540,42 @@ def load_grid(self, for ind, el in add_topo.iterrows(): pp.create_bus(self._grid, index=ind, **el) self._init_private_attrs() + self._aux_run_pf_init() # run yet another powerflow with the added buses # do this at the end self._in_service_line_col_id = int((self._grid.line.columns == "in_service").nonzero()[0][0]) self._in_service_trafo_col_id = int((self._grid.trafo.columns == "in_service").nonzero()[0][0]) self._in_service_storage_cold_id = int((self._grid.storage.columns == "in_service").nonzero()[0][0]) - + self.comp_time = 0. + + # hack for backward compat with oldest lightsim2grid version + try: + import lightsim2grid + if version.parse(lightsim2grid.__version__) < MIN_LS_VERSION_VM_PU: + warnings.warn("You are using a really old version of lightsim2grid. Consider upgrading.") + if "_options" in self._grid and "init_vm_pu" in self._grid["_options"]: + try: + float(self._grid["_options"]["init_vm_pu"]) + except ValueError as exc_: + # we delete it because lightsim2grid uses it + # to init its internal "GridModel" and did not check that + # this is a float until MIN_LS_VERSION_VM_PU + del self._grid["_options"]["init_vm_pu"] + except ImportError: + # lightsim2grid is not installed, so no risk to contaminate it + pass + + def _aux_run_pf_init(self): + """run a powerflow when the file is being loaded. This is called three times for each call to "load_grid" """ + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + try: + self._aux_runpf_pp(False) + if not self._grid.converged: + raise pp.powerflow.LoadflowNotConverged + except pp.powerflow.LoadflowNotConverged: + self._aux_runpf_pp(True) + def _init_private_attrs(self) -> None: # number of elements per substation self.sub_info = np.zeros(self.n_sub, dtype=dt_int) @@ -691,23 +694,23 @@ def _init_private_attrs(self) -> None: "prod_v" ] = self._load_grid_gen_vm_pu # lambda grid: grid.gen["vm_pu"] - self.load_pu_to_kv = self._grid.bus["vn_kv"][self.load_to_subid].values.astype( + self.load_pu_to_kv = 1. * self._grid.bus["vn_kv"][self.load_to_subid].values.astype( dt_float ) - self.prod_pu_to_kv = self._grid.bus["vn_kv"][self.gen_to_subid].values.astype( + self.prod_pu_to_kv = 1. * self._grid.bus["vn_kv"][self.gen_to_subid].values.astype( dt_float ) - self.lines_or_pu_to_kv = self._grid.bus["vn_kv"][ + self.lines_or_pu_to_kv = 1. * self._grid.bus["vn_kv"][ self.line_or_to_subid ].values.astype(dt_float) - self.lines_ex_pu_to_kv = self._grid.bus["vn_kv"][ + self.lines_ex_pu_to_kv = 1. * self._grid.bus["vn_kv"][ self.line_ex_to_subid ].values.astype(dt_float) - self.storage_pu_to_kv = self._grid.bus["vn_kv"][ + self.storage_pu_to_kv = 1. * self._grid.bus["vn_kv"][ self.storage_to_subid ].values.astype(dt_float) - self.thermal_limit_a = 1000 * np.concatenate( + self.thermal_limit_a = 1000. * np.concatenate( ( self._grid.line["max_i_ka"].values, self._grid.trafo["sn_mva"].values @@ -827,7 +830,7 @@ def apply_action(self, backendAction: Union["grid2op.Action._backendAction._Back """ if backendAction is None: return - + cls = type(self) ( @@ -1012,13 +1015,14 @@ def _aux_runpf_pp(self, is_dc: bool): ) warnings.filterwarnings("ignore", category=RuntimeWarning) warnings.filterwarnings("ignore", category=DeprecationWarning) - nb_bus = self.get_nb_active_bus() - if self._nb_bus_before is None: - self._pf_init = "dc" - elif nb_bus == self._nb_bus_before: - self._pf_init = "results" - else: - self._pf_init = "auto" + self._pf_init = "dc" + # nb_bus = self.get_nb_active_bus() + # if self._nb_bus_before is None: + # self._pf_init = "dc" + # elif nb_bus == self._nb_bus_before: + # self._pf_init = "results" + # else: + # self._pf_init = "auto" if (~self._grid.load["in_service"]).any(): # TODO see if there is a better way here -> do not handle this here, but rather in Backend._next_grid_state @@ -1081,12 +1085,13 @@ def runpf(self, is_dc : bool=False) -> Tuple[bool, Union[Exception, None]]: """ try: self._aux_runpf_pp(is_dc) - - cls = type(self) + cls = type(self) # if a connected bus has a no voltage, it's a divergence (grid was not connected) if self._grid.res_bus.loc[self._grid.bus["in_service"]]["va_degree"].isnull().any(): - raise pp.powerflow.LoadflowNotConverged("Isolated bus") - + buses_ko = self._grid.res_bus.loc[self._grid.bus["in_service"]]["va_degree"].isnull() + buses_ko = buses_ko.values.nonzero()[0] + raise pp.powerflow.LoadflowNotConverged(f"Isolated bus, check buses {buses_ko} with `env.backend._grid.res_bus.iloc[{buses_ko}, :]`") + ( self.prod_p[:], self.prod_q[:], @@ -1104,7 +1109,7 @@ def runpf(self, is_dc : bool=False) -> Tuple[bool, Union[Exception, None]]: if not np.isfinite(self.load_v).all(): # TODO see if there is a better way here # some loads are disconnected: it's a game over case! - raise pp.powerflow.LoadflowNotConverged("Isolated load") + raise pp.powerflow.LoadflowNotConverged(f"Isolated load: check loads {np.isfinite(self.load_v).nonzero()[0]}") else: # fix voltages magnitude that are always "nan" for dc case # self._grid.res_bus["vm_pu"] is always nan when computed in DC @@ -1130,7 +1135,7 @@ def runpf(self, is_dc : bool=False) -> Tuple[bool, Union[Exception, None]]: self.p_or[:] = self._aux_get_line_info("p_from_mw", "p_hv_mw") self.q_or[:] = self._aux_get_line_info("q_from_mvar", "q_hv_mvar") self.v_or[:] = self._aux_get_line_info("vm_from_pu", "vm_hv_pu") - self.a_or[:] = self._aux_get_line_info("i_from_ka", "i_hv_ka") * 1000 + self.a_or[:] = self._aux_get_line_info("i_from_ka", "i_hv_ka") * 1000. self.theta_or[:] = self._aux_get_line_info( "va_from_degree", "va_hv_degree" ) @@ -1140,7 +1145,7 @@ def runpf(self, is_dc : bool=False) -> Tuple[bool, Union[Exception, None]]: self.p_ex[:] = self._aux_get_line_info("p_to_mw", "p_lv_mw") self.q_ex[:] = self._aux_get_line_info("q_to_mvar", "q_lv_mvar") self.v_ex[:] = self._aux_get_line_info("vm_to_pu", "vm_lv_pu") - self.a_ex[:] = self._aux_get_line_info("i_to_ka", "i_lv_ka") * 1000 + self.a_ex[:] = self._aux_get_line_info("i_to_ka", "i_lv_ka") * 1000. self.theta_ex[:] = self._aux_get_line_info( "va_to_degree", "va_lv_degree" ) @@ -1158,7 +1163,9 @@ def runpf(self, is_dc : bool=False) -> Tuple[bool, Union[Exception, None]]: self.theta_ex[~np.isfinite(self.theta_ex)] = 0.0 self._nb_bus_before = None - self._grid._ppc["gen"][self._iref_slack, 1] = 0.0 + if self._iref_slack is not None: + # a gen has been added to represent the slack, modeled as an "ext_grid" + self._grid._ppc["gen"][self._iref_slack, 1] = 0.0 # handle storage units # note that we have to look ourselves for disconnected storage @@ -1179,13 +1186,17 @@ def runpf(self, is_dc : bool=False) -> Tuple[bool, Union[Exception, None]]: self._grid.storage["in_service"].values[deact_storage] = False self._topo_vect[:] = self._get_topo_vect() - return self._grid.converged, None + if not self._grid.converged: + raise pp.powerflow.LoadflowNotConverged("Divergence without specific reason (self._grid.converged is False)") + self.div_exception = None + return True, None except pp.powerflow.LoadflowNotConverged as exc_: # of the powerflow has not converged, results are Nan + self.div_exception = exc_ self._reset_all_nan() msg = exc_.__str__() - return False, BackendError(f'powerflow diverged with error :"{msg}"') + return False, BackendError(f'powerflow diverged with error :"{msg}", you can check `env.backend.div_exception` for more information') def _reset_all_nan(self) -> None: self.p_or[:] = np.NaN @@ -1221,7 +1232,6 @@ def copy(self) -> "PandaPowerBackend": This should return a deep copy of the Backend itself and not just the `self._grid` """ - # res = copy.deepcopy(self) # this was really slow... res = type(self)(**self._my_kwargs) # copy from base class (backend) @@ -1298,11 +1308,10 @@ def copy(self) -> "PandaPowerBackend": with warnings.catch_warnings(): warnings.simplefilter("ignore", FutureWarning) res.__pp_backend_initial_grid = copy.deepcopy(self.__pp_backend_initial_grid) - - res.tol = ( - self.tol - ) # this is NOT the pandapower tolerance !!!! this is used to check if a storage unit + + # this is NOT the pandapower tolerance !!!! this is used to check if a storage unit # produce / absorbs anything + res.tol = self.tol # TODO storage doc (in grid2op rst) of the backend res.can_output_theta = self.can_output_theta # I support the voltage angle @@ -1316,6 +1325,7 @@ def copy(self) -> "PandaPowerBackend": res._in_service_trafo_col_id = self._in_service_trafo_col_id res._missing_two_busbars_support_info = self._missing_two_busbars_support_info + res.div_exception = self.div_exception return res def close(self) -> None: diff --git a/grid2op/Environment/environment.py b/grid2op/Environment/environment.py index 51cdcc68..8e7214d0 100644 --- a/grid2op/Environment/environment.py +++ b/grid2op/Environment/environment.py @@ -444,7 +444,14 @@ def _init_backend( self._reset_redispatching() self._reward_to_obs = {} do_nothing = self._helper_action_env({}) + + # see issue https://github.com/rte-france/Grid2Op/issues/617 + # thermal limits are set AFTER this initial step + _no_overflow_disconnection = self._no_overflow_disconnection + self._no_overflow_disconnection = True *_, fail_to_start, info = self.step(do_nothing) + self._no_overflow_disconnection = _no_overflow_disconnection + if fail_to_start: raise Grid2OpException( "Impossible to initialize the powergrid, the powerflow diverge at iteration 0. " diff --git a/grid2op/tests/test_issue_616.py b/grid2op/tests/test_issue_616.py new file mode 100644 index 00000000..6a779da3 --- /dev/null +++ b/grid2op/tests/test_issue_616.py @@ -0,0 +1,320 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# See AUTHORS.txt and https://github.com/rte-france/Grid2Op/pull/319 +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + +import unittest +import grid2op +import tempfile +import numpy as np +import re +import os +import json +import warnings + +from grid2op.Chronics import (MultifolderWithCache, + GridStateFromFileWithForecastsWithMaintenance, + FromHandlers) +from grid2op.Chronics.handlers import (CSVHandler, + NoisyForecastHandler, + LoadQFromPHandler, + JSONMaintenanceHandler) + +from grid2op.Runner import Runner + + +class Issue616Tester(unittest.TestCase): + def setUp(self): + self.env_name = "l2rpn_case14_sandbox" + # create first env + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make(self.env_name, + test=True) + + # hack for adding maintenance + dict_maint = { + "maintenance_starting_hour": 1, + "maintenance_ending_hour": 2, + "line_to_maintenance": ["1_2_2", "1_4_4", "9_10_12", "12_13_14"], + "daily_proba_per_month_maintenance": [0.7 for _ in range(12)], + "max_daily_number_per_month_maintenance": [1 for _ in range(12)], + "maintenance_day_of_week": list(range(7)) + } + self.tmp_files = [os.path.join(env.get_path_env(), + "chronics", "0000", "maintenance_meta.json"), + os.path.join(env.get_path_env(), + "chronics", "0001", "maintenance_meta.json"), + os.path.join(env.get_path_env(), + "chronics", "0000", "maintenance_meta.json"), + ] + for path in self.tmp_files: + with open(path, "w", encoding="utf-8") as f: + json.dump(fp=f, obj=dict_maint) + env.close() + # create the env with the maintenance + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env_bug = grid2op.make(self.env_name, + chronics_class=MultifolderWithCache, + data_feeding_kwargs={"gridvalueClass": GridStateFromFileWithForecastsWithMaintenance}, + test=True + ) + self.env_bug.chronics_handler.reset() + + # store the normal maintenance schedule: + self.maint_ref = (np.array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, + 298, 299]) + 12, + np.array([4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2])) + + def tearDown(self) -> None: + self.env_bug.close() + for el in self.tmp_files: + if os.path.exists(el): + os.remove(el) + return super().tearDown() + + def test_reset(self): + """test that the seed is used correctly in env.reset""" + obs = self.env_bug.reset(seed=0, options={"time serie id": 0}) + maint_ref = 1. * self.env_bug.chronics_handler.real_data.data.maintenance + + obs = self.env_bug.reset(seed=1, options={"time serie id": 0}) + maint_1 = 1. * self.env_bug.chronics_handler.real_data.data.maintenance + + obs = self.env_bug.reset(seed=0, options={"time serie id": 0}) + maint_0 = 1. * self.env_bug.chronics_handler.real_data.data.maintenance + + assert (maint_ref == maint_0).all() + assert (maint_ref != maint_1).any() + assert (maint_ref.nonzero()[0] == self.maint_ref[0]).all() + assert (maint_ref.nonzero()[1] == self.maint_ref[1]).all() + + def test_runner(self): + """test the runner behaves correctly""" + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + runner = Runner(**self.env_bug.get_params_for_runner()) + res = runner.run(nb_episode=3, + env_seeds=[0, 1, 0], + max_iter=5, + add_detailed_output=True) + + maint_ref = np.array([ -1, -1, 300, -1, 12, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + dtype=np.int32) + assert (res[0][-1].observations[0].time_next_maintenance == maint_ref).all() + assert (res[0][-1].observations[0].time_next_maintenance != res[1][-1].observations[0].time_next_maintenance).any() + assert (res[0][-1].observations[0].time_next_maintenance == res[2][-1].observations[0].time_next_maintenance).all() + + def test_chronics_handler_twice_reset(self): + """test the same results is obtained if the chronics handler is reset twice""" + obs = self.env_bug.reset(seed=0, options={"time serie id": 0}) + maint_ref = 1. * self.env_bug.chronics_handler.real_data.data.maintenance + assert (maint_ref.nonzero()[0] == self.maint_ref[0]).all() + assert (maint_ref.nonzero()[1] == self.maint_ref[1]).all() + + self.env_bug.chronics_handler.reset() + maint_ref = 1. * self.env_bug.chronics_handler.real_data.data.maintenance + assert (maint_ref.nonzero()[0] == self.maint_ref[0]).all() + assert (maint_ref.nonzero()[1] == self.maint_ref[1]).all() + + +class Issue616WithHandlerTester(unittest.TestCase): + def setUp(self): + self.env_name = "l2rpn_case14_sandbox" + hs_ = [5*(i+1) for i in range(12)] + + # create first env + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make(self.env_name, + test=True) + + # hack for adding maintenance + dict_maint = { + "maintenance_starting_hour": 1, + "maintenance_ending_hour": 2, + "line_to_maintenance": ["1_2_2", "1_4_4", "9_10_12", "12_13_14"], + "daily_proba_per_month_maintenance": [0.7 for _ in range(12)], + "max_daily_number_per_month_maintenance": [1 for _ in range(12)], + "maintenance_day_of_week": list(range(7)) + } + self.tmp_json = tempfile.NamedTemporaryFile(dir=os.path.join(env.get_path_env(), "chronics", "0000"), + prefix="maintenance_meta", + suffix=".json") + with open(self.tmp_json.name, "w", encoding="utf-8") as f: + json.dump(fp=f, obj=dict_maint) + + # uses the default noise: sqrt(horizon) * 0.01 : error of 8% 1h ahead + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env_bug = grid2op.make(self.env_name, + chronics_class=MultifolderWithCache, + data_feeding_kwargs={"gridvalueClass": FromHandlers, + "gen_p_handler": CSVHandler("prod_p"), + "load_p_handler": CSVHandler("load_p"), + "gen_v_handler": CSVHandler("prod_v"), + "load_q_handler": LoadQFromPHandler("load_q"), + "h_forecast": hs_, + "maintenance_handler": JSONMaintenanceHandler(json_file_name=self.tmp_json.name), + "gen_p_for_handler": NoisyForecastHandler("prod_p_forecasted"), + "load_p_for_handler": NoisyForecastHandler("load_p_forecasted"), + "load_q_for_handler": NoisyForecastHandler("load_q_forecasted"), + }, + test=True + ) + self.env_bug.chronics_handler.reset() + + # store the normal maintenance schedule: + self.maint_ref = (np.array([ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, + 23, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, + 310, 311]), + np.array([12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14])) + + self.load_p_ref = np.array([[22. , 87. , 45.79999924, 7. , 12. , + 28.20000076, 8.69999981, 3.5 , 5.5 , 12.69999981, + 14.80000019], + [22.44357109, 90.38361359, 46.61357117, 7.00726891, 12.49121857, + 28.84151268, 8.93680668, 3.45285726, 5.58550406, 13.10054588, + 15.43630219], + [22.48419762, 89.22782135, 45.57607269, 6.98833132, 12.35618019, + 28.45972633, 9.01393414, 3.44352579, 5.57040882, 12.96386147, + 15.2933054 ], + [21.85004234, 86.51035309, 44.29330063, 6.82195902, 11.86427689, + 28.2765255 , 8.79933834, 3.36154509, 5.33892441, 12.65522861, + 14.92921543], + [21.61282349, 86.64777374, 44.50276947, 6.68032742, 11.88705349, + 27.90019035, 8.84160995, 3.34016371, 5.30496597, 12.57473373, + 14.63777542], + [23.22621727, 92.27429962, 47.29320145, 7.25162458, 12.71661758, + 30.16255379, 9.24844837, 3.57326436, 5.57008839, 13.34719276, + 15.97459316], + [20.23793983, 81.04374695, 42.03972244, 6.25536346, 10.85489559, + 26.03334999, 8.0951767 , 3.12768173, 5.05948496, 11.49882984, + 13.89058685], + [19.92967606, 81.96430206, 41.73068237, 6.54965878, 11.13441944, + 26.10506821, 8.04672432, 3.08769631, 4.95902777, 11.50868607, + 13.94141674], + [20.64870644, 83.94567871, 42.16581726, 6.56127167, 11.38573551, + 27.0170002 , 8.39456749, 3.1841464 , 5.21042156, 11.96467113, + 14.37690353], + [19.72007751, 79.25064087, 40.82889175, 6.11044645, 10.83215523, + 25.83052444, 7.77693176, 3.05522323, 4.814291 , 11.5728159 , + 13.9799614 ], + [21.79347801, 87.17391205, 42.77978897, 6.76001358, 11.70390511, + 28.14990807, 8.67703247, 3.32955885, 5.24657774, 12.30927849, + 14.83167171], + [19.81615639, 78.61643982, 40.09531021, 6.11152506, 10.64886951, + 25.27948952, 7.87090397, 2.96316385, 4.72254229, 11.20446301, + 13.88982964], + [19.3391819 , 77.26506805, 39.22829056, 6.04922247, 10.44865608, + 24.83847427, 7.8823204 , 2.93295646, 4.76605368, 11.18189621, + 13.19830322]]) + + self.load_q_ref = np.array([15.4 , 60.899998 , 32.059998 , 4.9 , 8.4 , + 19.74 , 6.0899997, 2.45 , 3.85 , 8.889999 , + 10.36 ], dtype=np.float32) + + def tearDown(self) -> None: + self.env_bug.close() + self.tmp_json.close() + return super().tearDown() + + def test_reset(self): + """test that the seed is used correctly in env.reset""" + obs = self.env_bug.reset(seed=0, options={"time serie id": 0}) + maint_ref = 1. * self.env_bug.chronics_handler.real_data.data.maintenance_handler.maintenance + load_q_ref = 1. * obs.load_q + load_p_ref = 1. * obs.get_forecast_arrays()[0] + + obs = self.env_bug.reset(seed=1, options={"time serie id": 0}) + maint_1 = 1. * self.env_bug.chronics_handler.real_data.data.maintenance_handler.maintenance + load_q_1 = 1. * obs.load_q + load_p_1= 1. * obs.get_forecast_arrays()[0] + + obs = self.env_bug.reset(seed=0, options={"time serie id": 0}) + maint_0 = 1. * self.env_bug.chronics_handler.real_data.data.maintenance_handler.maintenance + load_q_0 = 1. * obs.load_q + load_p_0 = 1. * obs.get_forecast_arrays()[0] + + # maintenance, so JSONMaintenanceHandler + assert (maint_ref == maint_0).all() + assert (maint_ref != maint_1).any() + assert (maint_ref.nonzero()[0] == self.maint_ref[0]).all() + assert (maint_ref.nonzero()[1] == self.maint_ref[1]).all() + + # load_q, so LoadQFromPHandler + assert (load_q_ref == load_q_0).all() + # assert (load_q_ref != load_q_1).any() # it's normal it works as this is not random ! + assert (load_q_ref == self.load_q_ref).all() + + # load_p_forecasted, so NoisyForecastHandler + assert (load_p_ref == load_p_0).all() + assert (load_p_ref != load_p_1).any() + assert (np.abs(load_p_ref - self.load_p_ref) <= 1e-6).all() + + def test_runner(self): + """test the runner behaves correctly""" + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + runner = Runner(**self.env_bug.get_params_for_runner()) + res = runner.run(nb_episode=3, + env_seeds=[0, 1, 0], + max_iter=5, + add_detailed_output=True) + obs = res[0][-1].observations[0] + maint_ref = 1. * obs.time_next_maintenance + load_q_ref = 1. * obs.load_q + # load_p_ref = 1. * obs.get_forecast_arrays()[0] not present in episodeData + + obs = res[1][-1].observations[0] + maint_1 = 1. * obs.time_next_maintenance + load_q_1 = 1. * obs.load_q + # load_p_1 = 1. * obs.get_forecast_arrays()[0] not present in episodeData + + obs = res[2][-1].observations[0] + maint_0 = 1. * obs.time_next_maintenance + load_q_0 = 1. * obs.load_q + # load_p_0 = 1. * obs.get_forecast_arrays()[0] not present in episodeData + + # maintenance, so JSONMaintenanceHandler + assert (maint_ref == maint_0).all() + assert (maint_ref != maint_1).any() + # TODO test against a reference data stored in the file + + # load_q, so LoadQFromPHandler + assert (load_q_ref == load_q_0).all() + # assert (load_q_ref != load_q_1).any() # it's normal it works as this is not random ! + assert (load_q_ref == self.load_q_ref).all() + + # load_p_forecasted, so NoisyForecastHandler + # assert (load_p_ref == load_p_0).all() + # assert (load_p_ref != load_p_1).any() + # TODO test that with an agent + + def test_chronics_handler_twice_reset(self): + """test the same results is obtained if the chronics handler is reset twice""" + obs = self.env_bug.reset(seed=0, options={"time serie id": 0}) + maint_ref = 1. * obs.time_next_maintenance + load_q_ref = 1. * obs.load_q + load_p_ref = 1. * obs.get_forecast_arrays()[0] + + self.env_bug.chronics_handler.reset() + maint_1 = 1. * obs.time_next_maintenance + load_q_1 = 1. * obs.load_q + load_p_1 = 1. * obs.get_forecast_arrays()[0] + + assert (np.abs(maint_ref - maint_1) <= 1e-6).all() + assert (np.abs(load_q_ref - load_q_1) <= 1e-6).all() + assert (np.abs(load_p_ref - load_p_1) <= 1e-6).all() + + +if __name__ == "__main__": + unittest.main() diff --git a/grid2op/tests/test_issue_617.py b/grid2op/tests/test_issue_617.py new file mode 100644 index 00000000..e9072a68 --- /dev/null +++ b/grid2op/tests/test_issue_617.py @@ -0,0 +1,102 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# See AUTHORS.txt and https://github.com/rte-france/Grid2Op/pull/319 +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + +import unittest +import pandapower as pp +import tempfile +import os +from pathlib import Path +import warnings +import copy +import numpy as np + + +from helper_path_test import PATH_DATA_TEST +import grid2op +from grid2op.Backend.pandaPowerBackend import PandaPowerBackend +from grid2op.Action.playableAction import PlayableAction +from grid2op.Observation.completeObservation import CompleteObservation +from grid2op.Reward.flatReward import FlatReward +from grid2op.Rules.DefaultRules import DefaultRules +from grid2op.Chronics.multiFolder import Multifolder +from grid2op.Chronics.gridStateFromFileWithForecasts import GridStateFromFileWithForecasts +from grid2op.Chronics import ChangeNothing + + +class Issue617Tester(unittest.TestCase): + def setUp(self): + self.env_name = "l2rpn_case14_sandbox" + # create first env + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + root_path = Path(os.path.abspath(PATH_DATA_TEST)) + self.env_path = tempfile.TemporaryDirectory(dir=root_path) + self.tol = 1e-6 + + def tearDown(self) -> None: + self.env_path.cleanup() + return super().tearDown() + + def create_config(self, env_path:Path, network, **kwargs): + thermal_limits = [10_000. * el for el in network.line.max_i_ka] # Thermal Limit in Amps (A) + with open(Path(env_path.name) / "config.py", "w") as config: + # Import Statements + config.writelines( + [f"from {value.__module__} import {value.__name__}\n" for value in kwargs.values() if hasattr(value, "__module__")] + ) + + # Config Dictionary + config.writelines( + ["config = {\n"] + + [f"'{k}':{getattr(v,'__name__', 'None')},\n" for k,v in kwargs.items()] + + [f"'thermal_limits':{thermal_limits}\n"] + + ["}\n"] + ) + return thermal_limits + + def create_pp_net(self): + network = pp.create_empty_network() + pp.create_buses(network, nr_buses=2, vn_kv=20.0) + pp.create_gen(network, bus=0, p_mw=10.0, min_p_mw=-1e9, max_p_mw=1e9, slack=True, slack_weight=1.0) + pp.create_line(network, from_bus=0, to_bus=1, length_km=10.0, std_type="NAYY 4x50 SE") + pp.create_load(network, bus=1, p_mw=10.0, controllable=False) + pp.to_json(network, Path(self.env_path.name) / "grid.json") + return network + + def test_can_make_env(self): + network = self.create_pp_net() + thermal_limits = self.create_config(self.env_path, + network, + backend=PandaPowerBackend, + action=PlayableAction, + observation_class=CompleteObservation, + reward_class=FlatReward, + gamerules_class=DefaultRules, + chronics_class=Multifolder, + grid_value_class=GridStateFromFileWithForecasts, + voltagecontroler_class=None, + names_chronics_to_grid=None) + + pp.runpp(network, numba=True, lightsim2grid=False, max_iteration=10, distributed_slack=False, init="dc", check_connectivity=False) + assert network.converged + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make(self.env_path.name, chronics_class=ChangeNothing) + assert (np.abs(env.get_thermal_limit() - thermal_limits) <= 1e-6).all() + obs = env.reset() + assert (np.abs(obs.p_or - network.res_line["p_from_mw"]) <= self.tol).all() + assert (np.abs(obs.q_or - network.res_line["q_from_mvar"]) <= self.tol).all() + assert (np.abs(obs.a_or - 1000. * network.res_line["i_from_ka"]) <= self.tol).all() + obs, reward, done, info = env.step(env.action_space()) + assert (np.abs(obs.p_or - network.res_line["p_from_mw"]) <= self.tol).all() + assert (np.abs(obs.q_or - network.res_line["q_from_mvar"]) <= self.tol).all() + assert (np.abs(obs.a_or - 1000. * network.res_line["i_from_ka"]) <= self.tol).all() + + +if __name__ == "__main__": + unittest.main()