From 8e79aa27ad3532569a4775228df1c7efdfaa7db5 Mon Sep 17 00:00:00 2001 From: R-Palazzo <116157184+R-Palazzo@users.noreply.github.com> Date: Wed, 26 Jun 2024 16:24:41 +0100 Subject: [PATCH] Add support for numpy 2.0.0 (#599) --- pyproject.toml | 6 +++--- .../multi_table/_properties/test_column_pair_trends.py | 5 +++-- .../statistical/test_cardinality_statistic_similarity.py | 3 ++- .../single_column/statistical/test_statistic_similarity.py | 7 ++++--- 4 files changed, 12 insertions(+), 9 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index f0e87e4d..0e4603b0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -20,9 +20,9 @@ license = { text = 'MIT license' } requires-python = ">=3.8,<3.13" readme = 'README.md' dependencies = [ - "numpy>=1.21.0,<2.0.0;python_version<'3.10'", - "numpy>=1.23.3,<2.0.0;python_version>='3.10' and python_version<'3.12'", - "numpy>=1.26.0,<2.0.0;python_version>='3.12'", + "numpy>=1.21.0;python_version<'3.10'", + "numpy>=1.23.3;python_version>='3.10' and python_version<'3.12'", + "numpy>=1.26.0;python_version>='3.12'", "pandas>=1.4.0;python_version<'3.11'", "pandas>=1.5.0;python_version>='3.11' and python_version<'3.12'", "pandas>=2.1.1;python_version>='3.12'", diff --git a/tests/integration/reports/multi_table/_properties/test_column_pair_trends.py b/tests/integration/reports/multi_table/_properties/test_column_pair_trends.py index eda415e7..65438403 100644 --- a/tests/integration/reports/multi_table/_properties/test_column_pair_trends.py +++ b/tests/integration/reports/multi_table/_properties/test_column_pair_trends.py @@ -1,5 +1,6 @@ from unittest.mock import Mock +import numpy as np from tqdm import tqdm from sdmetrics.demos import load_demo @@ -17,7 +18,7 @@ def test_end_to_end(self): result = column_pair_trends.get_score(real_data, synthetic_data, metadata) # Assert - assert result == 0.45654629583521095 + assert np.isclose(result, 0.45654629583521095, atol=1e-8) def test_with_progress_bar(self): """Test that the progress bar is correctly updated.""" @@ -37,5 +38,5 @@ def test_with_progress_bar(self): result = column_pair_trends.get_score(real_data, synthetic_data, metadata, progress_bar) # Assert - assert result == 0.45654629583521095 + assert np.isclose(result, 0.45654629583521095, atol=1e-8) assert mock_update.call_count == num_iter diff --git a/tests/unit/multi_table/statistical/test_cardinality_statistic_similarity.py b/tests/unit/multi_table/statistical/test_cardinality_statistic_similarity.py index 101e6f67..74804500 100644 --- a/tests/unit/multi_table/statistical/test_cardinality_statistic_similarity.py +++ b/tests/unit/multi_table/statistical/test_cardinality_statistic_similarity.py @@ -2,6 +2,7 @@ import numpy as np import pandas as pd +import pytest from sdmetrics.multi_table.statistical import CardinalityStatisticSimilarity from sdmetrics.warnings import ConstantInputWarning @@ -57,7 +58,7 @@ def test__compute_statistic_constant_input(self): ) # Run - with np.testing.assert_warns(ConstantInputWarning, match=expected_warn_msg): + with pytest.warns(ConstantInputWarning, match=expected_warn_msg): result = CardinalityStatisticSimilarity._compute_statistic( real_distribution, synthetic_distribution, 'mean' ) diff --git a/tests/unit/single_column/statistical/test_statistic_similarity.py b/tests/unit/single_column/statistical/test_statistic_similarity.py index b6c98f40..09650ecc 100644 --- a/tests/unit/single_column/statistical/test_statistic_similarity.py +++ b/tests/unit/single_column/statistical/test_statistic_similarity.py @@ -2,6 +2,7 @@ import numpy as np import pandas as pd +import pytest from sdmetrics.single_column.statistical import StatisticSimilarity from sdmetrics.warnings import ConstantInputWarning @@ -54,13 +55,13 @@ def test_compute_breakdown_constant_input(self): 'score': np.nan, } expected_warn_msg = ( - 'The real data input array is constant. ' - 'The StatisticSimilarity metric is either undefined or infinte.' + 'The real data input array is constant. The StatisticSimilarity ' + 'metric is either undefined or infinite.' ) # Run metric = StatisticSimilarity() - with np.testing.assert_warns(ConstantInputWarning, match=expected_warn_msg): + with pytest.warns(ConstantInputWarning, match=expected_warn_msg): result = metric.compute_breakdown(real_data, synthetic_data, statistic='mean') # Assert