Skip to content

Commit

Permalink
Merge branch 'master' into 302-new-data-source-for-gdac-from-amazon-s3
Browse files Browse the repository at this point in the history
  • Loading branch information
gmaze committed Dec 12, 2024
2 parents 4186506 + b078e8e commit de5c149
Show file tree
Hide file tree
Showing 18 changed files with 384 additions and 161 deletions.
16 changes: 8 additions & 8 deletions .github/workflows/pytests-upstream.yml
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ jobs:
steps:
- name: Energy Estimation - Initialize
if: ${{matrix.os == 'ubuntu-latest'}}
uses: green-coding-berlin/eco-ci-energy-estimation@v3
uses: green-coding-berlin/eco-ci-energy-estimation@v4
with:
task: start-measurement
company-uuid: ${{ secrets.CARBONDB_COMPANY_UUID }}
Expand Down Expand Up @@ -127,7 +127,7 @@ jobs:
- name: Energy Estimation - Measure Tests Setup
if: ${{matrix.os == 'ubuntu-latest'}}
uses: green-coding-berlin/eco-ci-energy-estimation@v3
uses: green-coding-berlin/eco-ci-energy-estimation@v4
env:
ELECTRICITY_MAPS_TOKEN: ${{ secrets.ELECTRICITY_MAPS_TOKEN }}
with:
Expand All @@ -147,7 +147,7 @@ jobs:
- name: Energy Estimation - Measure Tests Exec
if: ${{matrix.os == 'ubuntu-latest'}}
uses: green-coding-berlin/eco-ci-energy-estimation@v3
uses: green-coding-berlin/eco-ci-energy-estimation@v4
env:
ELECTRICITY_MAPS_TOKEN: ${{ secrets.ELECTRICITY_MAPS_TOKEN }}
with:
Expand All @@ -173,7 +173,7 @@ jobs:

- name: Energy Estimation - Show Results
if: ${{matrix.os == 'ubuntu-latest'}}
uses: green-coding-berlin/eco-ci-energy-estimation@v3
uses: green-coding-berlin/eco-ci-energy-estimation@v4
env:
ELECTRICITY_MAPS_TOKEN: ${{ secrets.ELECTRICITY_MAPS_TOKEN }}
with:
Expand Down Expand Up @@ -208,7 +208,7 @@ jobs:
steps:
- name: Energy Estimation - Initialize
if: ${{matrix.os == 'ubuntu-latest'}}
uses: green-coding-berlin/eco-ci-energy-estimation@v3
uses: green-coding-berlin/eco-ci-energy-estimation@v4
with:
task: start-measurement
company-uuid: ${{ secrets.CARBONDB_COMPANY_UUID }}
Expand Down Expand Up @@ -255,7 +255,7 @@ jobs:
- name: Energy Estimation - Measure Tests Setup
if: ${{matrix.os == 'ubuntu-latest'}}
uses: green-coding-berlin/eco-ci-energy-estimation@v3
uses: green-coding-berlin/eco-ci-energy-estimation@v4
env:
ELECTRICITY_MAPS_TOKEN: ${{ secrets.ELECTRICITY_MAPS_TOKEN }}
with:
Expand All @@ -275,7 +275,7 @@ jobs:
- name: Energy Estimation - Measure Tests Exec
if: ${{matrix.os == 'ubuntu-latest'}}
uses: green-coding-berlin/eco-ci-energy-estimation@v3
uses: green-coding-berlin/eco-ci-energy-estimation@v4
env:
ELECTRICITY_MAPS_TOKEN: ${{ secrets.ELECTRICITY_MAPS_TOKEN }}
with:
Expand All @@ -292,7 +292,7 @@ jobs:

- name: Energy Estimation - Show Results
if: ${{matrix.os == 'ubuntu-latest'}}
uses: green-coding-berlin/eco-ci-energy-estimation@v3
uses: green-coding-berlin/eco-ci-energy-estimation@v4
env:
ELECTRICITY_MAPS_TOKEN: ${{ secrets.ELECTRICITY_MAPS_TOKEN }}
with:
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/pytests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ jobs:
continue-on-error: true

- name: Upload coverage to Codecov
uses: codecov/codecov-action@v4.6.0
uses: codecov/codecov-action@v5.1.1
with:
token: ${{ secrets.CODECOV_TOKEN }}
file: ./cov.xml
Expand Down Expand Up @@ -261,7 +261,7 @@ jobs:
continue-on-error: true

- name: Upload coverage to Codecov
uses: codecov/codecov-action@v4.6.0
uses: codecov/codecov-action@v5.1.1
with:
token: ${{ secrets.CODECOV_TOKEN }}
file: ./cov.xml
Expand Down
3 changes: 1 addition & 2 deletions argopy/data_fetchers/erddap_data_processors.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,6 @@ def pre_process(
Fetched_url = this_ds.attrs.get("Fetched_uri")
else:
Fetched_url = this_ds.attrs.get("history", "").split('\n')[-1].split(' ')[-1]
Fetched_constraints = UriCName(Fetched_url)

# Finally overwrite erddap attributes with those from argopy:
raw_attrs = this_ds.attrs.copy()
Expand Down Expand Up @@ -104,7 +103,7 @@ def pre_process(
this_ds.attrs["Fetched_date"] = pd.to_datetime("now", utc=True).strftime(
"%Y/%m/%d"
)
this_ds.attrs["Fetched_constraints"] = Fetched_constraints
this_ds.attrs["Fetched_constraints"] = UriCName(Fetched_url).cname
this_ds.attrs["Fetched_uri"] = Fetched_url
this_ds = this_ds[np.sort(this_ds.data_vars)]

Expand Down
4 changes: 4 additions & 0 deletions argopy/data_fetchers/gdac_data_processors.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
import numpy as np
import xarray as xr
from typing import Literal
import logging


log = logging.getLogger("argopy.gdac.data")


def pre_process_multiprof(
Expand Down
45 changes: 36 additions & 9 deletions argopy/extensions/params_data_mode.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,11 @@
import copy

from ..utils import to_list, list_core_parameters
from ..utils.transform import split_data_mode, merge_param_with_param_adjusted, filter_param_by_data_mode
from ..utils.transform import (
split_data_mode,
merge_param_with_param_adjusted,
filter_param_by_data_mode,
)
from ..stores import (
indexstore_pd as ArgoIndex,
) # make sure we work with a Pandas index store
Expand Down Expand Up @@ -43,10 +47,12 @@ class ParamsDataMode(ArgoAccessorExtension):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)

def compute(self, indexfs: Union[None, ArgoIndex]) -> xr.Dataset: # noqa: C901
"""Compute and add <PARAM>_DATA_MODE variables to a xarray dataset
def _compute_from_ArgoIndex(
self, indexfs: Union[None, ArgoIndex]
) -> xr.Dataset: # noqa: C901
"""Compute <PARAM>_DATA_MODE variables from ArgoIndex
This method consume a collection of points.
This method consumes a collection of points.
Parameters
----------
Expand All @@ -55,9 +61,9 @@ def compute(self, indexfs: Union[None, ArgoIndex]) -> xr.Dataset: # noqa: C901
Returns
-------
:class:`xr.Dataset`
:class:`xarray.Dataset`
"""
idx = copy.copy(indexfs) if isinstance(indexfs, ArgoIndex) else ArgoIndex()
idx = indexfs.copy(deep=True) if isinstance(indexfs, ArgoIndex) else ArgoIndex()

def complete_df(this_df, params):
"""Add 'wmo', 'cyc' and '<param>_data_mode' columns to this dataframe"""
Expand Down Expand Up @@ -103,6 +109,7 @@ def print_etime(txt, t0):

profiles = self._argo.list_WMO_CYC
idx.search_wmo(self._argo.list_WMO)

params = [
p
for p in idx.read_params()
Expand Down Expand Up @@ -168,10 +175,30 @@ def print_etime(txt, t0):
self._obj = self._obj[np.sort(self._obj.data_vars)]
return self._obj

def split(self):
def compute(self, indexfs: Union[None, ArgoIndex]) -> xr.Dataset:
"""Compute <PARAM>_DATA_MODE variables"""
if "STATION_PARAMETERS" in self._obj and "PARAMETER_DATA_MODE" in self._obj:
return split_data_mode(self._obj)
else:
return self._compute_from_ArgoIndex(indexfs=indexfs)

def split(self) -> xr.Dataset:
"""Convert PARAMETER_DATA_MODE(N_PROF, N_PARAM) into several <PARAM>_DATA_MODE(N_PROF) variables
Using the list of *PARAM* found in ``STATION_PARAMETERS``, this method will create ``N_PARAM``
new variables in the dataset ``<PARAM>_DATA_MODE(N_PROF)``.
The variable ``PARAMETER_DATA_MODE`` is drop from the dataset at the end of the process.
Returns
-------
:class:`xarray.Dataset`
"""
return split_data_mode(self._obj)

def merge(self, params: Union[str, List[str]] = "all", errors: str = "raise") -> xr.Dataset:
def merge(
self, params: Union[str, List[str]] = "all", errors: str = "raise"
) -> xr.Dataset:
"""Merge <PARAM> and <PARAM>_ADJUSTED variables according to DATA_MODE or <PARAM>_DATA_MODE
Merging is done as follows:
Expand Down Expand Up @@ -251,7 +278,7 @@ def filter(
logical: str = "and",
mask: bool = False,
errors: str = "raise",
):
) -> xr.Dataset:
"""Filter measurements according to parameters data mode
Filter the dataset to keep points where all or some of the parameters are in any of the data mode specified.
Expand Down
2 changes: 2 additions & 0 deletions argopy/fetchers.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,8 @@ def __init__(self, mode: str = "", src: str = "", ds: str = "", **fetcher_kwargs
raise OptionValueError(
"The 'argovis' data source fetching is only available in 'standard' user mode"
)
if self._src == "gdac" and "bgc" in self._dataset_id:
warnings.warn("BGC data support with the 'gdac' data source is still in Work In Progress")

@property
def _icon_user_mode(self):
Expand Down
2 changes: 2 additions & 0 deletions argopy/stores/argo_index.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,8 @@ class ArgoIndex(indexstore):
>>> idx.read_wmo
>>> idx.read_params
>>> idx.records_per_wmo
>>> idx.copy(deep=False)
"""

Expand Down
Loading

0 comments on commit de5c149

Please sign in to comment.