diff --git a/tests/conftest.py b/tests/conftest.py index 2f969e3444..ad2d9d7cba 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -167,6 +167,6 @@ def datadir(tmp_path, request): dir_util.copy_tree(test_dir, str(tmp_path)) # shutil is nicer, but doesn't work: https://bugs.python.org/issue20849 # Once pyhf is Python 3.8+ only then the below can be used. - # shutil.copytree(test_dir, tmpdir) + # shutil.copytree(test_dir, tmp_path) return tmp_path diff --git a/tests/contrib/test_contrib_utils.py b/tests/contrib/test_contrib_utils.py index 5a0b69261b..4af73adf2d 100644 --- a/tests/contrib/test_contrib_utils.py +++ b/tests/contrib/test_contrib_utils.py @@ -1,6 +1,5 @@ import tarfile import zipfile -from pathlib import Path from shutil import rmtree import pytest @@ -10,70 +9,69 @@ @pytest.fixture(scope="function") -def tarfile_path(tmpdir): - with open( - tmpdir.join("test_file.txt").strpath, "w", encoding="utf-8" - ) as write_file: +def tarfile_path(tmp_path): + with open(tmp_path.joinpath("test_file.txt"), "w", encoding="utf-8") as write_file: write_file.write("test file") with tarfile.open( - tmpdir.join("test_tar.tar.gz").strpath, mode="w:gz", encoding="utf-8" + tmp_path.joinpath("test_tar.tar.gz"), mode="w:gz", encoding="utf-8" ) as archive: - archive.add(tmpdir.join("test_file.txt").strpath) - return Path(tmpdir.join("test_tar.tar.gz").strpath) + archive.add(tmp_path.joinpath("test_file.txt")) + return tmp_path.joinpath("test_tar.tar.gz") @pytest.fixture(scope="function") -def tarfile_uncompressed_path(tmpdir): - with open( - tmpdir.join("test_file.txt").strpath, "w", encoding="utf-8" - ) as write_file: +def tarfile_uncompressed_path(tmp_path): + with open(tmp_path.joinpath("test_file.txt"), "w", encoding="utf-8") as write_file: write_file.write("test file") with tarfile.open( - tmpdir.join("test_tar.tar").strpath, mode="w", encoding="utf-8" + tmp_path.joinpath("test_tar.tar"), mode="w", encoding="utf-8" ) as archive: - archive.add(tmpdir.join("test_file.txt").strpath) - return Path(tmpdir.join("test_tar.tar").strpath) + archive.add(tmp_path.joinpath("test_file.txt")) + return tmp_path.joinpath("test_tar.tar") @pytest.fixture(scope="function") -def zipfile_path(tmpdir): - with open( - tmpdir.join("test_file.txt").strpath, "w", encoding="utf-8" - ) as write_file: +def zipfile_path(tmp_path): + with open(tmp_path.joinpath("test_file.txt"), "w", encoding="utf-8") as write_file: write_file.write("test file") - with zipfile.ZipFile(tmpdir.join("test_zip.zip").strpath, "w") as archive: - archive.write(tmpdir.join("test_file.txt").strpath) - return Path(tmpdir.join("test_zip.zip").strpath) + with zipfile.ZipFile(tmp_path.joinpath("test_zip.zip"), "w") as archive: + archive.write(tmp_path.joinpath("test_file.txt")) + return tmp_path.joinpath("test_zip.zip") -def test_download_untrusted_archive_host(tmpdir, requests_mock): +def test_download_untrusted_archive_host(tmp_path, requests_mock): archive_url = "https://www.pyhfthisdoesnotexist.org" requests_mock.get(archive_url) with pytest.raises(InvalidArchiveHost): - download(archive_url, tmpdir.join("likelihoods").strpath) + download(archive_url, tmp_path.joinpath("likelihoods")) -def test_download_invalid_archive(tmpdir, requests_mock): +def test_download_invalid_archive(tmp_path, requests_mock): archive_url = "https://www.hepdata.net/record/resource/1408476?view=true" requests_mock.get(archive_url, status_code=404) with pytest.raises(InvalidArchive): - download(archive_url, tmpdir.join("likelihoods").strpath) + download(archive_url, tmp_path.joinpath("likelihoods")) -def test_download_compress(tmpdir, requests_mock): +def test_download_compress(tmp_path, requests_mock): archive_url = "https://www.hepdata.net/record/resource/1408476?view=true" requests_mock.get(archive_url) - download(archive_url, tmpdir.join("likelihoods").strpath, compress=True) + download(archive_url, tmp_path.joinpath("likelihoods"), compress=True) def test_download_archive_type( - tmpdir, mocker, requests_mock, tarfile_path, tarfile_uncompressed_path, zipfile_path + tmp_path, + mocker, + requests_mock, + tarfile_path, + tarfile_uncompressed_path, + zipfile_path, ): archive_url = "https://www.hepdata.net/record/resource/1408476?view=true" - output_directory = tmpdir.join("likelihoods").strpath + output_directory = tmp_path.joinpath("likelihoods") # Give BytesIO a tarfile requests_mock.get(archive_url, content=open(tarfile_path, "rb").read()) download(archive_url, output_directory) @@ -86,7 +84,7 @@ def test_download_archive_type( requests_mock.get(archive_url, content=open(zipfile_path, "rb").read()) # Run without and with existing output_directory to cover both # cases of the shutil.rmtree logic - rmtree(Path(output_directory)) + rmtree(output_directory) download(archive_url, output_directory) # without download(archive_url, output_directory) # with @@ -97,13 +95,13 @@ def test_download_archive_type( download(archive_url, output_directory) -def test_download_archive_force(tmpdir, requests_mock, tarfile_path): +def test_download_archive_force(tmp_path, requests_mock, tarfile_path): archive_url = "https://www.cern.ch/record/resource/123456789" requests_mock.get( archive_url, content=open(tarfile_path, "rb").read(), status_code=200 ) with pytest.raises(InvalidArchiveHost): - download(archive_url, tmpdir.join("likelihoods").strpath, force=False) + download(archive_url, tmp_path.joinpath("likelihoods"), force=False) - download(archive_url, tmpdir.join("likelihoods").strpath, force=True) + download(archive_url, tmp_path.joinpath("likelihoods"), force=True) diff --git a/tests/test_examples.py b/tests/test_examples.py index 9d4c2a1e1c..fa545726ec 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -1,7 +1,7 @@ import shlex -def test_2bin_1channel(tmpdir, script_runner): +def test_2bin_1channel(tmp_path, script_runner): command = f"pyhf inspect {'docs/examples/json/2-bin_1-channel.json':s}" ret = script_runner.run(shlex.split(command)) assert ret.success diff --git a/tests/test_infer.py b/tests/test_infer.py index f6a5bc6e92..0ccd072b94 100644 --- a/tests/test_infer.py +++ b/tests/test_infer.py @@ -23,7 +23,7 @@ def check_uniform_type(in_list): ) -def test_toms748_scan(tmpdir, hypotest_args): +def test_toms748_scan(tmp_path, hypotest_args): """ Test the upper limit toms748 scan returns the correct structure and values """ @@ -166,7 +166,7 @@ def test_upper_limit_with_kwargs(hypotest_args): ) -def test_mle_fit_default(tmpdir, hypotest_args): +def test_mle_fit_default(tmp_path, hypotest_args): """ Check that the default return structure of pyhf.infer.mle.fit is as expected """ @@ -180,7 +180,7 @@ def test_mle_fit_default(tmpdir, hypotest_args): assert pyhf.tensorlib.shape(result) == (model.config.npars,) -def test_mle_fit_return_fitted_val(tmpdir, hypotest_args): +def test_mle_fit_return_fitted_val(tmp_path, hypotest_args): """ Check that the return structure of pyhf.infer.mle.fit with the return_fitted_val keyword arg is as expected @@ -196,7 +196,7 @@ def test_mle_fit_return_fitted_val(tmpdir, hypotest_args): assert pyhf.tensorlib.shape(result[1]) == () -def test_hypotest_default(tmpdir, hypotest_args): +def test_hypotest_default(tmp_path, hypotest_args): """ Check that the default return structure of pyhf.infer.hypotest is as expected """ @@ -209,7 +209,7 @@ def test_hypotest_default(tmpdir, hypotest_args): assert isinstance(result, type(tb.astensor(result))) -def test_hypotest_poi_outofbounds(tmpdir, hypotest_args): +def test_hypotest_poi_outofbounds(tmp_path, hypotest_args): """ Check that the fit errors for POI outside of parameter bounds """ @@ -226,7 +226,7 @@ def test_hypotest_poi_outofbounds(tmpdir, hypotest_args): @pytest.mark.parametrize('test_stat', ['q0', 'q', 'qtilde']) -def test_hypotest_return_tail_probs(tmpdir, hypotest_args, test_stat): +def test_hypotest_return_tail_probs(tmp_path, hypotest_args, test_stat): """ Check that the return structure of pyhf.infer.hypotest with the return_tail_probs keyword arg is as expected @@ -243,7 +243,7 @@ def test_hypotest_return_tail_probs(tmpdir, hypotest_args, test_stat): @pytest.mark.parametrize('test_stat', ['q0', 'q', 'qtilde']) -def test_hypotest_return_expected(tmpdir, hypotest_args, test_stat): +def test_hypotest_return_expected(tmp_path, hypotest_args, test_stat): """ Check that the return structure of pyhf.infer.hypotest with the addition of the return_expected keyword arg is as expected @@ -265,7 +265,7 @@ def test_hypotest_return_expected(tmpdir, hypotest_args, test_stat): @pytest.mark.parametrize('test_stat', ['q0', 'q', 'qtilde']) -def test_hypotest_return_expected_set(tmpdir, hypotest_args, test_stat): +def test_hypotest_return_expected_set(tmp_path, hypotest_args, test_stat): """ Check that the return structure of pyhf.infer.hypotest with the addition of the return_expected_set keyword arg is as expected @@ -300,7 +300,7 @@ def test_hypotest_return_expected_set(tmpdir, hypotest_args, test_stat): @pytest.mark.parametrize('return_expected', [True, False]) @pytest.mark.parametrize('return_expected_set', [True, False]) def test_hypotest_return_calculator( - tmpdir, + tmp_path, hypotest_args, calctype, kwargs, @@ -491,7 +491,7 @@ def test_significance_to_pvalue_roundtrip(backend): assert np.allclose(sigma, back_to_sigma, atol=0, rtol=rtol) -def test_emperical_distribution(tmpdir, hypotest_args): +def test_emperical_distribution(tmp_path, hypotest_args): """ Check that the empirical distribution of the test statistic gives expected results @@ -537,7 +537,7 @@ def test_emperical_distribution(tmpdir, hypotest_args): ) -def test_toy_calculator(tmpdir, hypotest_args): +def test_toy_calculator(tmp_path, hypotest_args): """ Check that the toy calculator is performing as expected """ diff --git a/tests/test_notebooks.py b/tests/test_notebooks.py index 07b978c2ff..bc005f2201 100644 --- a/tests/test_notebooks.py +++ b/tests/test_notebooks.py @@ -11,8 +11,8 @@ @pytest.fixture() -def common_kwargs(tmpdir): - outputnb = tmpdir.join('output.ipynb') +def common_kwargs(tmp_path): + outputnb = tmp_path.joinpath('output.ipynb') return { 'output_path': str(outputnb), 'kernel_name': f'python{sys.version_info.major}', diff --git a/tests/test_scripts.py b/tests/test_scripts.py index 0dd88e9b8a..9d00814c8e 100644 --- a/tests/test_scripts.py +++ b/tests/test_scripts.py @@ -5,7 +5,6 @@ import tarfile import time from importlib import import_module, reload -from pathlib import Path from unittest import mock import pytest @@ -15,16 +14,14 @@ @pytest.fixture(scope="function") -def tarfile_path(tmpdir): - with open( - tmpdir.join("test_file.txt").strpath, "w", encoding="utf-8" - ) as write_file: +def tarfile_path(tmp_path): + with open(tmp_path.joinpath("test_file.txt"), "w", encoding="utf-8") as write_file: write_file.write("test file") with tarfile.open( - tmpdir.join("test_tar.tar.gz").strpath, mode="w:gz", encoding="utf-8" + tmp_path.joinpath("test_tar.tar.gz"), mode="w:gz", encoding="utf-8" ) as archive: - archive.add(tmpdir.join("test_file.txt").strpath) - return Path(tmpdir.join("test_tar.tar.gz").strpath) + archive.add(tmp_path.joinpath("test_file.txt")) + return tmp_path.joinpath("test_tar.tar.gz") def test_version(script_runner): @@ -57,29 +54,29 @@ def test_citation(script_runner, flag): # see test_import.py for the same (detailed) test -def test_import_prepHistFactory(tmpdir, script_runner): - temp = tmpdir.join("parsed_output.json") - command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp.strpath:s} --hide-progress' +def test_import_prepHistFactory(tmp_path, script_runner): + temp = tmp_path.joinpath("parsed_output.json") + command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp} --hide-progress' ret = script_runner.run(shlex.split(command)) assert ret.success assert ret.stdout == '' assert ret.stderr == '' - parsed_xml = json.loads(temp.read()) + parsed_xml = json.loads(temp.read_text()) spec = {'channels': parsed_xml['channels']} pyhf.schema.validate(spec, 'model.json') -def test_import_prepHistFactory_withProgress(tmpdir, script_runner): - temp = tmpdir.join("parsed_output.json") - command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp.strpath:s}' +def test_import_prepHistFactory_withProgress(tmp_path, script_runner): + temp = tmp_path.joinpath("parsed_output.json") + command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp}' ret = script_runner.run(shlex.split(command)) assert ret.success assert ret.stdout == '' assert ret.stderr != '' -def test_import_prepHistFactory_stdout(tmpdir, script_runner): +def test_import_prepHistFactory_stdout(tmp_path, script_runner): command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/' ret = script_runner.run(shlex.split(command)) assert ret.success @@ -89,12 +86,12 @@ def test_import_prepHistFactory_stdout(tmpdir, script_runner): assert d -def test_import_prepHistFactory_and_fit(tmpdir, script_runner): - temp = tmpdir.join("parsed_output.json") - command = f"pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp.strpath:s}" +def test_import_prepHistFactory_and_fit(tmp_path, script_runner): + temp = tmp_path.joinpath("parsed_output.json") + command = f"pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp}" ret = script_runner.run(shlex.split(command)) - command = f"pyhf fit {temp.strpath:s}" + command = f"pyhf fit {temp}" ret = script_runner.run(shlex.split(command)) assert ret.success @@ -109,7 +106,7 @@ def test_import_prepHistFactory_and_fit(tmpdir, script_runner): "LogNormExample", "ConstExample", ]: - command = f"pyhf fit {temp.strpath:s} --value --measurement {measurement:s}" + command = f"pyhf fit {temp} --value --measurement {measurement:s}" ret = script_runner.run(shlex.split(command)) assert ret.success @@ -118,22 +115,22 @@ def test_import_prepHistFactory_and_fit(tmpdir, script_runner): assert "mle_parameters" in ret_json assert "twice_nll" in ret_json - tmp_out = tmpdir.join(f"{measurement:s}_output.json") + tmp_out = tmp_path.joinpath(f"{measurement:s}_output.json") # make sure output file works too - command += f" --output-file {tmp_out.strpath:s}" + command += f" --output-file {tmp_out}" ret = script_runner.run(shlex.split(command)) assert ret.success - ret_json = json.load(tmp_out) + ret_json = json.load(tmp_out.open()) assert "mle_parameters" in ret_json assert "twice_nll" in ret_json -def test_import_prepHistFactory_and_cls(tmpdir, script_runner): - temp = tmpdir.join("parsed_output.json") - command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp.strpath:s}' +def test_import_prepHistFactory_and_cls(tmp_path, script_runner): + temp = tmp_path.joinpath("parsed_output.json") + command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp}' ret = script_runner.run(shlex.split(command)) - command = f'pyhf cls {temp.strpath:s}' + command = f'pyhf cls {temp}' ret = script_runner.run(shlex.split(command)) assert ret.success @@ -148,7 +145,7 @@ def test_import_prepHistFactory_and_cls(tmpdir, script_runner): 'LogNormExample', 'ConstExample', ]: - command = f'pyhf cls {temp.strpath:s} --measurement {measurement:s}' + command = f'pyhf cls {temp} --measurement {measurement:s}' ret = script_runner.run(shlex.split(command)) assert ret.success @@ -157,37 +154,37 @@ def test_import_prepHistFactory_and_cls(tmpdir, script_runner): assert 'CLs_obs' in d assert 'CLs_exp' in d - tmp_out = tmpdir.join(f'{measurement:s}_output.json') + tmp_out = tmp_path.joinpath(f'{measurement:s}_output.json') # make sure output file works too - command += f' --output-file {tmp_out.strpath:s}' + command += f' --output-file {tmp_out}' ret = script_runner.run(shlex.split(command)) assert ret.success - d = json.load(tmp_out) + d = json.load(tmp_out.open()) assert 'CLs_obs' in d assert 'CLs_exp' in d -def test_import_usingMounts(datadir, tmpdir, script_runner): +def test_import_usingMounts(datadir, tmp_path, script_runner): data = datadir.joinpath("xmlimport_absolutePaths") - temp = tmpdir.join("parsed_output.json") - command = f'pyhf xml2json --hide-progress -v {data}:/absolute/path/to -v {data}:/another/absolute/path/to --output-file {temp.strpath:s} {data.joinpath("config/example.xml")}' + temp = tmp_path.joinpath("parsed_output.json") + command = f'pyhf xml2json --hide-progress -v {data}:/absolute/path/to -v {data}:/another/absolute/path/to --output-file {temp} {data.joinpath("config/example.xml")}' ret = script_runner.run(shlex.split(command)) assert ret.success assert ret.stdout == '' assert ret.stderr == '' - parsed_xml = json.loads(temp.read()) + parsed_xml = json.loads(temp.read_text()) spec = {'channels': parsed_xml['channels']} pyhf.schema.validate(spec, 'model.json') -def test_import_usingMounts_badDelimitedPaths(datadir, tmpdir, script_runner): +def test_import_usingMounts_badDelimitedPaths(datadir, tmp_path, script_runner): data = datadir.joinpath("xmlimport_absolutePaths") - temp = tmpdir.join("parsed_output.json") - command = f'pyhf xml2json --hide-progress -v {data}::/absolute/path/to -v {data}/another/absolute/path/to --output-file {temp.strpath:s} {data.joinpath("config/example.xml")}' + temp = tmp_path.joinpath("parsed_output.json") + command = f'pyhf xml2json --hide-progress -v {data}::/absolute/path/to -v {data}/another/absolute/path/to --output-file {temp} {data.joinpath("config/example.xml")}' ret = script_runner.run(shlex.split(command)) assert not ret.success @@ -196,12 +193,12 @@ def test_import_usingMounts_badDelimitedPaths(datadir, tmpdir, script_runner): @pytest.mark.parametrize("backend", ["numpy", "tensorflow", "pytorch", "jax"]) -def test_fit_backend_option(tmpdir, script_runner, backend): - temp = tmpdir.join("parsed_output.json") - command = f"pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp.strpath:s}" +def test_fit_backend_option(tmp_path, script_runner, backend): + temp = tmp_path.joinpath("parsed_output.json") + command = f"pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp}" ret = script_runner.run(shlex.split(command)) - command = f"pyhf fit --backend {backend:s} {temp.strpath:s}" + command = f"pyhf fit --backend {backend:s} {temp}" ret = script_runner.run(shlex.split(command)) assert ret.success @@ -211,12 +208,12 @@ def test_fit_backend_option(tmpdir, script_runner, backend): @pytest.mark.parametrize("backend", ["numpy", "tensorflow", "pytorch", "jax"]) -def test_cls_backend_option(tmpdir, script_runner, backend): - temp = tmpdir.join("parsed_output.json") - command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp.strpath:s}' +def test_cls_backend_option(tmp_path, script_runner, backend): + temp = tmp_path.joinpath("parsed_output.json") + command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp}' ret = script_runner.run(shlex.split(command)) - command = f'pyhf cls --backend {backend:s} {temp.strpath:s}' + command = f'pyhf cls --backend {backend:s} {temp}' ret = script_runner.run(shlex.split(command)) assert ret.success @@ -226,86 +223,98 @@ def test_cls_backend_option(tmpdir, script_runner, backend): assert 'CLs_exp' in d -def test_import_and_export(tmpdir, script_runner): - temp = tmpdir.join("parsed_output.json") - command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp.strpath:s}' +def test_import_and_export(tmp_path, script_runner): + temp = tmp_path.joinpath("parsed_output.json") + command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp}' ret = script_runner.run(shlex.split(command)) - command = f"pyhf json2xml {temp.strpath:s} --output-dir {tmpdir.mkdir('output').strpath:s}" + output_dir_path = tmp_path / "output" + output_dir_path.mkdir() + + command = f"pyhf json2xml {temp} --output-dir {output_dir_path}" ret = script_runner.run(shlex.split(command)) assert ret.success -def test_patch(tmpdir, script_runner): - patch = tmpdir.join('patch.json') +def test_patch(tmp_path, script_runner): + patch = tmp_path.joinpath('patch.json') - patch.write( + patch.write_text( ''' [{"op": "replace", "path": "/channels/0/samples/0/data", "value": [5,6]}] ''' ) - temp = tmpdir.join("parsed_output.json") - command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp.strpath:s}' + temp = tmp_path.joinpath("parsed_output.json") + command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp}' ret = script_runner.run(shlex.split(command)) - command = f'pyhf cls {temp.strpath:s} --patch {patch.strpath:s}' + command = f'pyhf cls {temp} --patch {patch}' ret = script_runner.run(shlex.split(command)) assert ret.success - command = f"pyhf json2xml {temp.strpath:s} --output-dir {tmpdir.mkdir('output_1').strpath:s} --patch {patch.strpath:s}" + output_dir_path = tmp_path / "output_1" + output_dir_path.mkdir(exist_ok=True) + + command = f"pyhf json2xml {temp} --output-dir {output_dir_path} --patch {patch}" ret = script_runner.run(shlex.split(command)) assert ret.success - command = f'pyhf cls {temp.strpath:s} --patch -' + command = f'pyhf cls {temp} --patch -' - ret = script_runner.run(shlex.split(command), stdin=patch) + ret = script_runner.run(shlex.split(command), stdin=patch.open()) assert ret.success - command = f"pyhf json2xml {temp.strpath:s} --output-dir {tmpdir.mkdir('output_2').strpath:s} --patch -" - ret = script_runner.run(shlex.split(command), stdin=patch) + output_dir_path = tmp_path / "output_2" + output_dir_path.mkdir(exist_ok=True) + + command = f"pyhf json2xml {temp} --output-dir {output_dir_path} --patch -" + ret = script_runner.run(shlex.split(command), stdin=patch.open()) assert ret.success -def test_patch_fail(tmpdir, script_runner): - patch = tmpdir.join('patch.json') +def test_patch_fail(tmp_path, script_runner): + patch = tmp_path.joinpath('patch.json') - patch.write('''not,json''') + patch.write_text('''not,json''') - temp = tmpdir.join("parsed_output.json") - command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp.strpath:s}' + temp = tmp_path.joinpath("parsed_output.json") + command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp}' ret = script_runner.run(shlex.split(command)) - command = f'pyhf cls {temp.strpath:s} --patch {patch.strpath:s}' + command = f'pyhf cls {temp} --patch {patch}' ret = script_runner.run(shlex.split(command)) assert not ret.success - command = f"pyhf json2xml {temp.strpath:s} --output-dir {tmpdir.mkdir('output').strpath:s} --patch {patch.strpath:s}" + output_dir_path = tmp_path / "output" + output_dir_path.mkdir() + + command = f"pyhf json2xml {temp} --output-dir {output_dir_path} --patch {patch}" ret = script_runner.run(shlex.split(command)) assert not ret.success -def test_bad_measurement_name(tmpdir, script_runner): - temp = tmpdir.join("parsed_output.json") - command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp.strpath:s}' +def test_bad_measurement_name(tmp_path, script_runner): + temp = tmp_path.joinpath("parsed_output.json") + command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp}' ret = script_runner.run(shlex.split(command)) - command = f'pyhf cls {temp.strpath:s} --measurement "a-fake-measurement-name"' + command = f'pyhf cls {temp} --measurement "a-fake-measurement-name"' ret = script_runner.run(shlex.split(command)) assert not ret.success # assert 'no measurement by name' in ret.stderr # numpy swallows the log.error() here, dunno why -def test_testpoi(tmpdir, script_runner): - temp = tmpdir.join("parsed_output.json") - command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp.strpath:s}' +def test_testpoi(tmp_path, script_runner): + temp = tmp_path.joinpath("parsed_output.json") + command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp}' ret = script_runner.run(shlex.split(command)) pois = [1.0, 0.5, 0.001] results_exp = [] results_obs = [] for test_poi in pois: - command = f'pyhf cls {temp.strpath:s} --test-poi {test_poi:f}' + command = f'pyhf cls {temp} --test-poi {test_poi:f}' ret = script_runner.run(shlex.split(command)) assert ret.success @@ -331,13 +340,13 @@ def test_testpoi(tmpdir, script_runner): @pytest.mark.parametrize( "opts,success", [(["maxiter=1000"], True), (["maxiter=1"], False)] ) -def test_fit_optimizer(tmpdir, script_runner, optimizer, opts, success): - temp = tmpdir.join("parsed_output.json") - command = f"pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp.strpath:s}" +def test_fit_optimizer(tmp_path, script_runner, optimizer, opts, success): + temp = tmp_path.joinpath("parsed_output.json") + command = f"pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp}" ret = script_runner.run(shlex.split(command)) optconf = " ".join(f"--optconf {opt}" for opt in opts) - command = f"pyhf fit --optimizer {optimizer} {optconf} {temp.strpath}" + command = f"pyhf fit --optimizer {optimizer} {optconf} {temp}" ret = script_runner.run(shlex.split(command)) assert ret.success == success @@ -347,39 +356,39 @@ def test_fit_optimizer(tmpdir, script_runner, optimizer, opts, success): @pytest.mark.parametrize( 'opts,success', [(['maxiter=1000'], True), (['maxiter=1'], False)] ) -def test_cls_optimizer(tmpdir, script_runner, optimizer, opts, success): - temp = tmpdir.join("parsed_output.json") - command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp.strpath:s}' +def test_cls_optimizer(tmp_path, script_runner, optimizer, opts, success): + temp = tmp_path.joinpath("parsed_output.json") + command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp}' ret = script_runner.run(shlex.split(command)) optconf = " ".join(f"--optconf {opt}" for opt in opts) - command = f'pyhf cls {temp.strpath} --optimizer {optimizer} {optconf}' + command = f'pyhf cls {temp} --optimizer {optimizer} {optconf}' ret = script_runner.run(shlex.split(command)) assert ret.success == success -def test_inspect(tmpdir, script_runner): - temp = tmpdir.join("parsed_output.json") - command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp.strpath:s} --hide-progress' +def test_inspect(tmp_path, script_runner): + temp = tmp_path.joinpath("parsed_output.json") + command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp} --hide-progress' ret = script_runner.run(shlex.split(command)) - command = f'pyhf inspect {temp.strpath:s}' + command = f'pyhf inspect {temp}' ret = script_runner.run(shlex.split(command)) assert ret.success -def test_inspect_outfile(tmpdir, script_runner): - temp = tmpdir.join("parsed_output.json") - command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp.strpath:s} --hide-progress' +def test_inspect_outfile(tmp_path, script_runner): + temp = tmp_path.joinpath("parsed_output.json") + command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp} --hide-progress' ret = script_runner.run(shlex.split(command)) - tempout = tmpdir.join("inspect_output.json") - command = f'pyhf inspect {temp.strpath:s} --output-file {tempout.strpath:s}' + tempout = tmp_path.joinpath("inspect_output.json") + command = f'pyhf inspect {temp} --output-file {tempout}' ret = script_runner.run(shlex.split(command)) assert ret.success - summary = json.loads(tempout.read()) + summary = json.loads(tempout.read_text()) assert [ 'channels', 'measurements', @@ -396,65 +405,63 @@ def test_inspect_outfile(tmpdir, script_runner): assert len(summary['systematics']) == 6 -def test_prune(tmpdir, script_runner): - temp = tmpdir.join("parsed_output.json") - command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp.strpath:s} --hide-progress' +def test_prune(tmp_path, script_runner): + temp = tmp_path.joinpath("parsed_output.json") + command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp} --hide-progress' ret = script_runner.run(shlex.split(command)) - command = ( - f"pyhf prune -m staterror_channel1 --measurement GammaExample {temp.strpath:s}" - ) + command = f"pyhf prune -m staterror_channel1 --measurement GammaExample {temp}" ret = script_runner.run(shlex.split(command)) assert ret.success -def test_prune_outfile(tmpdir, script_runner): - temp = tmpdir.join("parsed_output.json") - command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp.strpath:s} --hide-progress' +def test_prune_outfile(tmp_path, script_runner): + temp = tmp_path.joinpath("parsed_output.json") + command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp} --hide-progress' ret = script_runner.run(shlex.split(command)) - tempout = tmpdir.join("prune_output.json") - command = f'pyhf prune -m staterror_channel1 --measurement GammaExample {temp.strpath:s} --output-file {tempout.strpath:s}' + tempout = tmp_path.joinpath("prune_output.json") + command = f'pyhf prune -m staterror_channel1 --measurement GammaExample {temp} --output-file {tempout}' ret = script_runner.run(shlex.split(command)) assert ret.success - spec = json.loads(temp.read()) + spec = json.loads(temp.read_text()) ws = pyhf.Workspace(spec) assert 'GammaExample' in ws.measurement_names assert 'staterror_channel1' in ws.model().config.parameters - pruned_spec = json.loads(tempout.read()) + pruned_spec = json.loads(tempout.read_text()) pruned_ws = pyhf.Workspace(pruned_spec) assert 'GammaExample' not in pruned_ws.measurement_names assert 'staterror_channel1' not in pruned_ws.model().config.parameters -def test_rename(tmpdir, script_runner): - temp = tmpdir.join("parsed_output.json") - command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp.strpath:s} --hide-progress' +def test_rename(tmp_path, script_runner): + temp = tmp_path.joinpath("parsed_output.json") + command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp} --hide-progress' ret = script_runner.run(shlex.split(command)) - command = f'pyhf rename -m staterror_channel1 staterror_channelone --measurement GammaExample GamEx {temp.strpath:s}' + command = f'pyhf rename -m staterror_channel1 staterror_channelone --measurement GammaExample GamEx {temp}' ret = script_runner.run(shlex.split(command)) assert ret.success -def test_rename_outfile(tmpdir, script_runner): - temp = tmpdir.join("parsed_output.json") - command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp.strpath:s} --hide-progress' +def test_rename_outfile(tmp_path, script_runner): + temp = tmp_path.joinpath("parsed_output.json") + command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp} --hide-progress' ret = script_runner.run(shlex.split(command)) - tempout = tmpdir.join("rename_output.json") - command = f'pyhf rename -m staterror_channel1 staterror_channelone --measurement GammaExample GamEx {temp.strpath:s} --output-file {tempout.strpath:s}' + tempout = tmp_path.joinpath("rename_output.json") + command = f'pyhf rename -m staterror_channel1 staterror_channelone --measurement GammaExample GamEx {temp} --output-file {tempout}' ret = script_runner.run(shlex.split(command)) assert ret.success - spec = json.loads(temp.read()) + spec = json.loads(temp.read_text()) ws = pyhf.Workspace(spec) assert 'GammaExample' in ws.measurement_names assert 'GamEx' not in ws.measurement_names assert 'staterror_channel1' in ws.model().config.parameters assert 'staterror_channelone' not in ws.model().config.parameters - renamed_spec = json.loads(tempout.read()) + renamed_spec = json.loads(tempout.read_text()) renamed_ws = pyhf.Workspace(renamed_spec) assert 'GammaExample' not in renamed_ws.measurement_names assert 'GamEx' in renamed_ws.measurement_names @@ -462,10 +469,10 @@ def test_rename_outfile(tmpdir, script_runner): assert 'staterror_channelone' in renamed_ws.model().config.parameters -def test_combine(tmpdir, script_runner): - temp_1 = tmpdir.join("parsed_output.json") - temp_2 = tmpdir.join("renamed_output.json") - command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp_1.strpath:s} --hide-progress' +def test_combine(tmp_path, script_runner): + temp_1 = tmp_path.joinpath("parsed_output.json") + temp_2 = tmp_path.joinpath("renamed_output.json") + command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp_1} --hide-progress' ret = script_runner.run(shlex.split(command)) rename_channels = {'channel1': 'channel2'} @@ -482,18 +489,18 @@ def test_combine(tmpdir, script_runner): _opts_measurements = ''.join( ' --measurement ' + ' '.join(item) for item in rename_measurements.items() ) - command = f"pyhf rename {temp_1.strpath:s} {_opts_channels:s} {_opts_measurements:s} --output-file {temp_2.strpath:s}" + command = f"pyhf rename {temp_1} {_opts_channels:s} {_opts_measurements:s} --output-file {temp_2}" ret = script_runner.run(shlex.split(command)) - command = f'pyhf combine {temp_1.strpath:s} {temp_2.strpath:s}' + command = f'pyhf combine {temp_1} {temp_2}' ret = script_runner.run(shlex.split(command)) assert ret.success -def test_combine_outfile(tmpdir, script_runner): - temp_1 = tmpdir.join("parsed_output.json") - temp_2 = tmpdir.join("renamed_output.json") - command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp_1.strpath:s} --hide-progress' +def test_combine_outfile(tmp_path, script_runner): + temp_1 = tmp_path.joinpath("parsed_output.json") + temp_2 = tmp_path.joinpath("renamed_output.json") + command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp_1} --hide-progress' ret = script_runner.run(shlex.split(command)) rename_channels = {'channel1': 'channel2'} @@ -510,35 +517,33 @@ def test_combine_outfile(tmpdir, script_runner): _opts_measurements = ''.join( ' --measurement ' + ' '.join(item) for item in rename_measurements.items() ) - command = f"pyhf rename {temp_1.strpath:s} {_opts_channels:s} {_opts_measurements:s} --output-file {temp_2.strpath:s}" + command = f"pyhf rename {temp_1} {_opts_channels:s} {_opts_measurements:s} --output-file {temp_2}" ret = script_runner.run(shlex.split(command)) - tempout = tmpdir.join("combined_output.json") - command = f'pyhf combine {temp_1.strpath:s} {temp_2.strpath:s} --output-file {tempout.strpath:s}' + tempout = tmp_path.joinpath("combined_output.json") + command = f'pyhf combine {temp_1} {temp_2} --output-file {tempout}' ret = script_runner.run(shlex.split(command)) assert ret.success - combined_spec = json.loads(tempout.read()) + combined_spec = json.loads(tempout.read_text()) combined_ws = pyhf.Workspace(combined_spec) assert combined_ws.channels == ['channel1', 'channel2'] assert len(combined_ws.measurement_names) == 8 -def test_combine_merge_channels(tmpdir, script_runner): - temp_1 = tmpdir.join("parsed_output.json") - temp_2 = tmpdir.join("renamed_output.json") - command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp_1.strpath} --hide-progress' +def test_combine_merge_channels(tmp_path, script_runner): + temp_1 = tmp_path.joinpath("parsed_output.json") + temp_2 = tmp_path.joinpath("renamed_output.json") + command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp_1} --hide-progress' ret = script_runner.run(shlex.split(command)) assert ret.success - command = ( - f'pyhf prune {temp_1.strpath} --sample signal --output-file {temp_2.strpath}' - ) + command = f'pyhf prune {temp_1} --sample signal --output-file {temp_2}' ret = script_runner.run(shlex.split(command)) assert ret.success - command = f'pyhf combine --merge-channels --join "left outer" {temp_1.strpath} {temp_2.strpath}' + command = f'pyhf combine --merge-channels --join "left outer" {temp_1} {temp_2}' ret = script_runner.run(shlex.split(command)) assert ret.success @@ -547,17 +552,19 @@ def test_combine_merge_channels(tmpdir, script_runner): @pytest.mark.parametrize( 'algorithms', [['md5'], ['sha256'], ['sha256', 'md5'], ['sha256', 'md5']] ) -def test_workspace_digest(tmpdir, script_runner, algorithms, do_json): +def test_workspace_digest(tmp_path, script_runner, algorithms, do_json): results = { 'md5': '7de8930ff37e5a4f6a31da11bda7813f', 'sha256': '6d416ee67a40460499ea2ef596fb1e682a563d7df06e690018a211d35238aecc', } - temp = tmpdir.join("parsed_output.json") - command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp.strpath} --hide-progress' + temp = tmp_path.joinpath("parsed_output.json") + command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp} --hide-progress' ret = script_runner.run(shlex.split(command)) - command = f"pyhf digest {temp.strpath} -a {' -a '.join(algorithms)}{' -j' if do_json else ''}" + command = ( + f"pyhf digest {temp} -a {' -a '.join(algorithms)}{' -j' if do_json else ''}" + ) ret = script_runner.run(shlex.split(command)) assert ret.success assert all(algorithm in ret.stdout for algorithm in algorithms) @@ -588,21 +595,23 @@ def test_workspace_digest(tmpdir, script_runner, algorithms, do_json): "https://doi.org/10.17182/hepdata.89408.v1/r2", ], ) -def test_patchset_download(tmpdir, script_runner, requests_mock, tarfile_path, archive): +def test_patchset_download( + tmp_path, script_runner, requests_mock, tarfile_path, archive +): requests_mock.get(archive, content=open(tarfile_path, "rb").read()) - command = f'pyhf contrib download {archive} {tmpdir.join("likelihoods").strpath}' + command = f'pyhf contrib download {archive} {tmp_path.joinpath("likelihoods")}' ret = script_runner.run(shlex.split(command)) assert ret.success # Run with all optional flags - command = f'pyhf contrib download --verbose --force {archive} {tmpdir.join("likelihoods").strpath}' + command = f'pyhf contrib download --verbose --force {archive} {tmp_path.joinpath("likelihoods")}' ret = script_runner.run(shlex.split(command)) assert ret.success requests_mock.get( "https://www.pyhfthisdoesnotexist.org/record/resource/1234567", status_code=200 ) - command = f'pyhf contrib download --verbose https://www.pyhfthisdoesnotexist.org/record/resource/1234567 {tmpdir.join("likelihoods").strpath}' + command = f'pyhf contrib download --verbose https://www.pyhfthisdoesnotexist.org/record/resource/1234567 {tmp_path.joinpath("likelihoods")}' ret = script_runner.run(shlex.split(command)) assert not ret.success assert ( @@ -614,7 +623,7 @@ def test_patchset_download(tmpdir, script_runner, requests_mock, tarfile_path, a requests_mock.get( "https://httpstat.us/404/record/resource/1234567", status_code=404 ) - command = f'pyhf contrib download --verbose --force https://httpstat.us/404/record/resource/1234567 {tmpdir.join("likelihoods").strpath}' + command = f'pyhf contrib download --verbose --force https://httpstat.us/404/record/resource/1234567 {tmp_path.joinpath("likelihoods")}' ret = script_runner.run(shlex.split(command)) assert not ret.success assert "gives a response code of 404" in ret.stderr @@ -686,11 +695,11 @@ def test_patchset_inspect(datadir, script_runner): @pytest.mark.parametrize('output_file', [False, True]) @pytest.mark.parametrize('with_metadata', [False, True]) -def test_patchset_extract(datadir, tmpdir, script_runner, output_file, with_metadata): - temp = tmpdir.join("extracted_output.json") +def test_patchset_extract(datadir, tmp_path, script_runner, output_file, with_metadata): + temp = tmp_path.joinpath("extracted_output.json") command = f'pyhf patchset extract {datadir.joinpath("example_patchset.json")} --name patch_channel1_signal_syst1' if output_file: - command += f" --output-file {temp.strpath}" + command += f" --output-file {temp}" if with_metadata: command += " --with-metadata" @@ -698,7 +707,7 @@ def test_patchset_extract(datadir, tmpdir, script_runner, output_file, with_meta assert ret.success if output_file: - extracted_output = json.loads(temp.read()) + extracted_output = json.loads(temp.read_text()) else: extracted_output = json.loads(ret.stdout) if with_metadata: @@ -721,17 +730,17 @@ def test_patchset_verify(datadir, script_runner): @pytest.mark.parametrize('output_file', [False, True]) -def test_patchset_apply(datadir, tmpdir, script_runner, output_file): - temp = tmpdir.join("patched_output.json") +def test_patchset_apply(datadir, tmp_path, script_runner, output_file): + temp = tmp_path.joinpath("patched_output.json") command = f'pyhf patchset apply {datadir.joinpath("example_bkgonly.json")} {datadir.joinpath("example_patchset.json")} --name patch_channel1_signal_syst1' if output_file: - command += f" --output-file {temp.strpath}" + command += f" --output-file {temp}" ret = script_runner.run(shlex.split(command)) assert ret.success if output_file: - extracted_output = json.loads(temp.read()) + extracted_output = json.loads(temp.read_text()) else: extracted_output = json.loads(ret.stdout) assert extracted_output['channels'][0]['samples'][0]['modifiers'][0]['data'] == { @@ -740,24 +749,24 @@ def test_patchset_apply(datadir, tmpdir, script_runner, output_file): } -def test_sort(tmpdir, script_runner): - temp = tmpdir.join("parsed_output.json") - command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp.strpath:s} --hide-progress' +def test_sort(tmp_path, script_runner): + temp = tmp_path.joinpath("parsed_output.json") + command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp} --hide-progress' ret = script_runner.run(shlex.split(command)) - command = f'pyhf sort {temp.strpath}' + command = f'pyhf sort {temp}' ret = script_runner.run(shlex.split(command)) assert ret.success -def test_sort_outfile(tmpdir, script_runner): - temp = tmpdir.join("parsed_output.json") - command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp.strpath:s} --hide-progress' +def test_sort_outfile(tmp_path, script_runner): + temp = tmp_path.joinpath("parsed_output.json") + command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp} --hide-progress' ret = script_runner.run(shlex.split(command)) - tempout = tmpdir.join("sort_output.json") - command = f'pyhf sort {temp.strpath} --output-file {tempout.strpath}' + tempout = tmp_path.joinpath("sort_output.json") + command = f'pyhf sort {temp} --output-file {tempout}' ret = script_runner.run(shlex.split(command)) assert ret.success