diff --git a/.build_rtd_docs/Doxyfile b/.build_rtd_docs/Doxyfile
index 13bdf99d1e7..1dc8033b435 100644
--- a/.build_rtd_docs/Doxyfile
+++ b/.build_rtd_docs/Doxyfile
@@ -37,7 +37,7 @@ PROJECT_NAME = "MODFLOW 6"
# could be handy for archiving the generated documentation or if some version
# control system is used.
-PROJECT_NUMBER = "version 6.2.2"
+PROJECT_NUMBER = "version 6.5.0.dev0"
# Using the PROJECT_BRIEF tag one can provide an optional one line description
# for a project that appears at the top of each page and should give viewer a
diff --git a/.build_rtd_docs/conf.py b/.build_rtd_docs/conf.py
index bfd38623e25..40b117a9f22 100644
--- a/.build_rtd_docs/conf.py
+++ b/.build_rtd_docs/conf.py
@@ -56,6 +56,15 @@
# copy the file
shutil.copy(src, dst)
+# -- copy deprecations markdown ---------------------------------------------
+print("Copy the deprecations table")
+dstdir = "_mf6run"
+fpth = "deprecations.md"
+src = os.path.join("..", "doc", "mf6io", "mf6ivar", "md", fpth)
+dst = os.path.join(dstdir, fpth)
+# copy the file
+shutil.copy(src, dst)
+
# -- build the mf6io markdown files -----------------------------------------
print("Build the mf6io markdown files")
pth = os.path.join("..", "doc", "mf6io", "mf6ivar")
@@ -140,11 +149,9 @@
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
-html_context = {
- "css_files": [
- "_static/theme_overrides.css", # override wide tables in RTD theme
- ],
-}
+html_css_files = [
+ "_static/theme_overrides.css", # override wide tables in RTD theme
+]
# html_theme_options = {
# "github_url": "https://github.com/MODFLOW-USGS/modflow6",
diff --git a/.build_rtd_docs/index.rst b/.build_rtd_docs/index.rst
index e5b8ee8cae1..813d5a4daff 100644
--- a/.build_rtd_docs/index.rst
+++ b/.build_rtd_docs/index.rst
@@ -14,4 +14,5 @@ Contents:
MODFLOW 6 Source Code Documentation
mf6io
_mf6run/run-time-comparison.md
+ _mf6run/deprecations.md
diff --git a/.build_rtd_docs/requirements.rtd.txt b/.build_rtd_docs/requirements.rtd.txt
index e531a07c623..979de2f6768 100644
--- a/.build_rtd_docs/requirements.rtd.txt
+++ b/.build_rtd_docs/requirements.rtd.txt
@@ -1,5 +1,6 @@
numpy
bmipy
+sphinx>=4
sphinx_markdown_tables
nbsphinx
nbsphinx_link
@@ -7,7 +8,7 @@ ipython
ipykernel
rtds_action
myst-parser
-sphinx_rtd_theme
+sphinx_rtd_theme>=1
pytest
filelock
modflow-devtools
\ No newline at end of file
diff --git a/.doc/conf.py b/.doc/conf.py
index 599c330ac4d..8ed07df6dda 100644
--- a/.doc/conf.py
+++ b/.doc/conf.py
@@ -112,14 +112,13 @@
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
-
html_context = {
"github_repo": "modflow6",
"doc_path": ".doc",
- "css_files": [
- "_static/theme_overrides.css", # override wide tables in RTD theme
- ],
}
+html_css_files = [
+ "_static/theme_overrides.css", # override wide tables in RTD theme
+]
html_theme_options = {}
diff --git a/.doc/requirements.txt b/.doc/requirements.txt
index 2174ec428a1..7c642ad9cda 100644
--- a/.doc/requirements.txt
+++ b/.doc/requirements.txt
@@ -2,5 +2,6 @@ sphinx_markdown_tables
ipython
ipykernel
rtds_action
-sphinx_rtd_theme
+sphinx>=4
+sphinx_rtd_theme>=1
myst-parser
diff --git a/.fprettify.yaml b/.fprettify.yaml
index c19702b116f..553ef061397 100644
--- a/.fprettify.yaml
+++ b/.fprettify.yaml
@@ -1,6 +1,6 @@
# MODFLOW 6 configuration for fprettify
# run from root directory using
-# fprettify -c distribution/.fprettify.yaml SRC
+# fprettify -c .fprettify.yaml SRC
whitespace-plusminus: 1
whitespace-multdiv: 1
line-length: 82
diff --git a/.gitattributes b/.gitattributes
index e5e837d1e7f..9134554730d 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -13,3 +13,8 @@
*.png binary
*.jpg binary
*.pdf binary
+
+# Configure github-linguist
+*.inc linguist-language=Fortran
+.build_rtd_docs/** linguist-documentation
+.doc/** linguist-documentation
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
index bbfd8d081ab..6d7a3d18f05 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.md
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -10,7 +10,7 @@ assignees: ''
**Describe the bug**
A clear and concise description of what the bug is.
-**To Reproduce**
+**To reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
@@ -23,6 +23,7 @@ A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
-**Desktop (please complete the following information):**
- - OS: [e.g. macOS, Linux, Windows]
- - Version [e.g. 22]
+**Environment**
+ - Operating system (e.g. macOS, Linux, Windows) and version
+ - MODFLOW 6 version (if installed via distribution)
+ - Compiler toolchain/version (if built from source)
diff --git a/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md b/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md
new file mode 100644
index 00000000000..1affcbe0ed5
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md
@@ -0,0 +1,8 @@
+Feel free to remove check-list items that aren't relevant to your change.
+
+- [ ] Closes #xxxx
+- [ ] Passed autotests
+- [ ] Formatted source files with `fprettify`
+- [ ] Updated definition (*.dfn) files with new or modified options
+- [ ] Described new options, features or behavior changes in release notes
+- [ ] Updated meson files, makefiles, and Visual Studio project files if new source files added
\ No newline at end of file
diff --git a/.github/common/fortran_format_check.py b/.github/common/fortran_format_check.py
index 8452e5a69c6..a2c6eb1411e 100644
--- a/.github/common/fortran_format_check.py
+++ b/.github/common/fortran_format_check.py
@@ -18,7 +18,8 @@
]
# Exclude these files from checks
-excludefiles = ["src/Utilities/InputOutput.f90"] # excluded until refactored
+excludefiles = [] # add excluded files here
+
class FortranFormatCheck:
"""
@@ -99,6 +100,7 @@ def _excluded(self, path: Path) -> bool:
return False
+
if __name__ == "__main__":
parser = argparse.ArgumentParser(
"MODFLOW 6 fortran format source code verification"
@@ -106,9 +108,7 @@ def _excluded(self, path: Path) -> bool:
parser.add_argument(
"-r", "--root", help="path to MODFLOW 6 repository root directory"
)
- parser.add_argument(
- "-v", "--verbose", action="store_true", help="verbose"
- )
+ parser.add_argument("-v", "--verbose", action="store_true", help="verbose")
args = parser.parse_args()
# set MODFLOW 6 repository root
diff --git a/.github/common/msvs_vfproj_check.py b/.github/common/msvs_vfproj_check.py
new file mode 100644
index 00000000000..80c1f3d719d
--- /dev/null
+++ b/.github/common/msvs_vfproj_check.py
@@ -0,0 +1,68 @@
+import xml.etree.ElementTree as ET
+from pathlib import Path
+
+
+def get_source_files(src_folder):
+ p = Path(".")
+ src_files = []
+ print(f"Processing {src_folder} folder")
+ ftypes = ("*.[fF]9[05]", "*.inc")
+ src_files = []
+ for ft in ftypes:
+ src_files.extend(p.glob(f"{src_folder}/**/{ft}"))
+ return src_files
+
+
+def get_msvs_files(vfproj_file):
+ print(f"Processing {vfproj_file}")
+ tree = ET.parse(vfproj_file)
+ root = tree.getroot()
+ msvs_files = []
+ for f in root.iter("File"):
+ s = f.attrib["RelativePath"]
+ s = s.replace("\\", "/")
+ s = s.replace("../", "")
+ fpath = Path(s)
+ msvs_files.append(fpath)
+ return msvs_files
+
+
+def check_files(name, src_files, msvs_files):
+ print(
+ f"Verifying {name} files referenced in msvs project files are in src folder..."
+ )
+ s, m = set(src_files), set(msvs_files)
+ diff = s ^ m
+ from pprint import pformat
+
+ assert not any(diff), (
+ f"{name} src files don't match msvs project file\n"
+ f"=> symmetric difference:\n{pformat(diff)}\n"
+ f"=> src - msvs:\n{pformat(s - m)}\n"
+ f"=> msvs - src:\n{pformat(m - s)}\n"
+ "Check to make sure msvs project file is consistent with source files."
+ )
+
+
+def check_mf6():
+ # get list of source files and files referenced in msvs project files
+ src_files = get_source_files("src")
+ msvs_files = []
+ for vfproj in ["./msvs/mf6core.vfproj", "./msvs/mf6.vfproj"]:
+ msvs_files.extend(get_msvs_files(vfproj))
+ check_files("MF6", src_files, msvs_files)
+
+
+def check_bmi():
+ # get list of source files and files referenced in msvs project files
+ src_files = get_source_files("srcbmi")
+ msvs_files = []
+ for vfproj in ["./msvs/mf6bmi.vfproj"]:
+ msvs_files.extend(get_msvs_files(vfproj))
+ check_files("BMI", src_files, msvs_files)
+
+
+if __name__ == "__main__":
+ check_mf6()
+ check_bmi()
+ print("msvs project (vfproj) files appear up-to-date...")
diff --git a/.github/common/update_compat_tables.py b/.github/common/update_compat_tables.py
new file mode 100644
index 00000000000..59255741806
--- /dev/null
+++ b/.github/common/update_compat_tables.py
@@ -0,0 +1,35 @@
+"""
+Inserts Markdown compatibility tables
+between tags in target Markdown file.
+"""
+
+import re
+import sys
+from pathlib import Path
+
+name = sys.argv[1] # name of the table, e.g. "compile", "test"
+compat_path = Path(sys.argv[2]) # compatibility table path
+update_path = Path(sys.argv[3]) # path to file to update
+
+assert compat_path.is_file()
+assert update_path.is_file()
+
+with open(compat_path, "r") as compat:
+ table = "".join(compat.readlines())
+ r = re.compile(
+ r".*",
+ re.DOTALL,
+ )
+ ct = (
+ "{}"
+ )
+ readme = update_path.open().read()
+ update_path.open("w").write(r.sub(ct, readme))
diff --git a/.github/common/wide_compat_reports.py b/.github/common/wide_compat_reports.py
new file mode 100644
index 00000000000..f2ffb531ba1
--- /dev/null
+++ b/.github/common/wide_compat_reports.py
@@ -0,0 +1,39 @@
+"""
+Converts compatibility reports from long to wide format
+and makes a markdown table from the wide format report.
+"""
+
+from pathlib import Path
+import pandas as pd
+import sys
+
+ip = Path(sys.argv[1]) # input file path
+op = Path(sys.argv[2]) # output file path
+assert ip.is_file()
+assert ip.suffix == ".csv"
+assert op.suffix == ".csv"
+
+# read long CSV
+df = pd.read_csv(ip)
+
+# pivot and sort
+df = pd.pivot(
+ df,
+ index="runner",
+ columns=["compiler", "version"],
+ values="support",
+).sort_values(by=["runner"])
+
+# write wide CSV
+df.to_csv(op)
+
+# write wide markdown table
+with open(op.with_suffix(".md"), "w") as file:
+ file.write(
+ df.to_markdown()
+ .replace("nan", "")
+ .replace("(", "")
+ .replace(")", "")
+ .replace(",", "")
+ .replace("'", "")
+ )
diff --git a/.github/compat/comp.csv b/.github/compat/comp.csv
new file mode 100644
index 00000000000..32e0a1b4c31
--- /dev/null
+++ b/.github/compat/comp.csv
@@ -0,0 +1,9 @@
+compiler,gcc,gcc,gcc,gcc,gcc,gcc,gcc,intel-classic,intel-classic,intel-classic,intel-classic,intel-classic,intel-classic,intel-classic,intel-classic,intel-classic,intel-classic,intel,intel,intel,intel,intel,intel,intel,intel,intel,intel
+version,10,11,12,13,7,8,9,2021.1,2021.10,2021.2,2021.3,2021.4,2021.5,2021.6,2021.7,2021.8,2021.9,2021.1,2021.2,2021.4,2022.0,2022.1,2022.2.1,2022.2,2023.0,2023.1,2023.2
+runner,,,,,,,,,,,,,,,,,,,,,,,,,,,
+macos-11,✓,✓,✓,✓,✓,✓,✓,✓,✓,✓,✓,✓,✓,✓,✓,✓,✓,,,,,,,,,,
+macos-12,✓,✓,✓,✓,✓,✓,✓,✓,✓,✓,✓,✓,✓,✓,✓,✓,✓,,,,,,,,,,
+ubuntu-20.04,✓,✓,,,✓,✓,✓,✓,✓,✓,,✓,✓,✓,✓,✓,✓,,,,,,✓,✓,,,✓
+ubuntu-22.04,✓,✓,✓,✓,,,✓,✓,✓,✓,,✓,✓,✓,✓,✓,✓,,,,,,✓,✓,,,✓
+windows-2019,✓,✓,✓,✓,,,✓,,✓,,,,,✓,✓,✓,✓,,,,,,,✓,,,✓
+windows-2022,✓,✓,✓,✓,,,✓,,✓,,,,,✓,✓,✓,✓,,,,,,,✓,,,✓
diff --git a/.github/compat/test.csv b/.github/compat/test.csv
new file mode 100644
index 00000000000..a029341ceef
--- /dev/null
+++ b/.github/compat/test.csv
@@ -0,0 +1,9 @@
+compiler,gcc,gcc,gcc,gcc,gcc,gcc,gcc,intel-classic,intel-classic,intel-classic,intel-classic,intel-classic,intel-classic,intel-classic,intel-classic,intel-classic,intel-classic,intel,intel,intel,intel,intel,intel,intel,intel,intel,intel
+version,10,11,12,13,7,8,9,2021.1,2021.10,2021.2,2021.3,2021.4,2021.5,2021.6,2021.7,2021.8,2021.9,2021.1,2021.2,2021.4,2022.0,2022.1,2022.2.1,2022.2,2023.0,2023.1,2023.2
+runner,,,,,,,,,,,,,,,,,,,,,,,,,,,
+macos-11,✓,✓,✓,✓,✓,✓,✓,✓,,✓,✓,✓,✓,✓,✓,,,,,,,,,,,,
+macos-12,✓,✓,✓,✓,,,,✓,,✓,✓,✓,✓,✓,✓,,,,,,,,,,,,
+ubuntu-20.04,✓,✓,,,✓,✓,✓,✓,,✓,,✓,✓,✓,✓,,,,,,,,,,,,
+ubuntu-22.04,✓,✓,✓,✓,,,✓,✓,,✓,,✓,✓,✓,✓,,,,,,,,,,,,
+windows-2019,,,,✓,,,,,,,,,,✓,✓,,,,,,,,,,,,
+windows-2022,✓,✓,✓,✓,,,✓,,,,,,,✓,✓,,,,,,,,,,,,
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 75c7b43d4b6..b0333674e46 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -1,23 +1,43 @@
name: MODFLOW 6 continuous integration
on:
push:
- branches:
- - master
- - develop
- - ci-diagnose*
paths-ignore:
- '**.md'
- - 'doc/**'
+ - '**.pdf'
+ - '**.tex'
+ - '**.jpg'
+ - '**.jpeg'
+ - '**.png'
+ - '**.bbl'
+ - '**.bib'
+ - 'doc/**.dat'
+ - 'doc/**.ipynb'
+ - 'doc/**.py'
+ - 'doc/**.sh'
+ - 'doc/**.xlsx'
+ - '.hpc/**'
pull_request:
branches:
- master
- develop
paths-ignore:
- '**.md'
- - 'doc/**'
+ - '**.pdf'
+ - '**.tex'
+ - '**.jpg'
+ - '**.jpeg'
+ - '**.png'
+ - '**.bbl'
+ - '**.bib'
+ - 'doc/**.dat'
+ - 'doc/**.ipynb'
+ - 'doc/**.py'
+ - 'doc/**.sh'
+ - 'doc/**.xlsx'
+ - '.hpc/**'
jobs:
lint:
- name: Lint (fprettify)
+ name: Check format
runs-on: ubuntu-latest
defaults:
run:
@@ -25,7 +45,7 @@ jobs:
steps:
- name: Checkout modflow6
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: Setup Micromamba
uses: mamba-org/setup-micromamba@v1
@@ -37,22 +57,25 @@ jobs:
- name: Check Fortran source formatting
run: python .github/common/fortran_format_check.py
+ - name: Check msvs project files
+ run: python .github/common/msvs_vfproj_check.py
+
build:
- name: Build (gfortran 12)
+ name: Build
runs-on: ubuntu-22.04
defaults:
run:
shell: bash -l {0}
env:
FC: gfortran
- GCC_V: 12
+ GCC_V: 13
steps:
- name: Checkout modflow6
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: Setup gfortran ${{ env.GCC_V }}
- uses: awvwgk/setup-fortran@main
+ uses: fortran-lang/setup-fortran@v1
with:
compiler: gcc
version: ${{ env.GCC_V }}
@@ -70,29 +93,39 @@ jobs:
- name: Meson compile
run: meson compile -C builddir
+ - name: Show build log
+ if: failure()
+ run: cat builddir/meson-logs/meson-log.txt
+
- name: Meson test
run: meson test --verbose --no-rebuild -C builddir
smoke_test:
- name: Smoke test (gfortran 12)
+ name: Smoke test
runs-on: ubuntu-22.04
defaults:
run:
shell: bash -l {0}
env:
FC: gfortran
- GCC_V: 12
+ GCC: 13
steps:
- name: Checkout modflow6
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
path: modflow6
+
+ - name: Checkout test-drive
+ uses: actions/checkout@v4
+ with:
+ repository: fortran-lang/test-drive
+ path: test-drive
- - name: Setup GNU Fortran ${{ env.GCC_V }}
- uses: awvwgk/setup-fortran@main
+ - name: Setup GNU Fortran ${{ env.GCC }}
+ uses: fortran-lang/setup-fortran@v1
with:
compiler: gcc
- version: ${{ env.GCC_V }}
+ version: ${{ env.GCC }}
- name: Setup Micromamba
uses: mamba-org/setup-micromamba@v1
@@ -101,12 +134,27 @@ jobs:
cache-environment: true
cache-downloads: true
+ - name: Build test-drive
+ working-directory: test-drive
+ run: |
+ meson setup builddir --prefix=$(pwd) --libdir=lib
+ meson install -C builddir
+ echo "PKG_CONFIG_PATH=$(pwd)/lib/pkgconfig:$PKG_CONFIG_PATH" >> $GITHUB_ENV
+
- name: Build modflow6
working-directory: modflow6
run: |
meson setup builddir -Ddebug=false --prefix=$(pwd) --libdir=bin
meson install -C builddir
- meson test --verbose --no-rebuild -C builddir
+
+ - name: Show build log
+ if: failure()
+ working-directory: modflow6
+ run: cat builddir/meson-logs/meson-log.txt
+
+ - name: Unit test programs
+ working-directory: modflow6
+ run: meson test --verbose --no-rebuild -C builddir
- name: Update flopy
working-directory: modflow6/autotest
@@ -127,8 +175,8 @@ jobs:
pytest -v -n auto --durations 0 -S
fi
- test_gfortran_latest:
- name: Test (gfortran 12)
+ test_gfortran:
+ name: Test gnu fortran
needs:
- lint
- build
@@ -143,30 +191,30 @@ jobs:
shell: bash -l {0}
env:
FC: gfortran
- GCC_V: 12
+ GCC: 13
steps:
- name: Checkout modflow6
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
path: modflow6
- name: Checkout modflow6-testmodels
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
repository: MODFLOW-USGS/modflow6-testmodels
path: modflow6-testmodels
- name: Checkout modflow6-examples
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
repository: MODFLOW-USGS/modflow6-examples
path: modflow6-examples
- - name: Setup GNU Fortran ${{ env.GCC_V }}
- uses: awvwgk/setup-fortran@main
+ - name: Setup GNU Fortran ${{ env.GCC }}
+ uses: fortran-lang/setup-fortran@v1
with:
compiler: gcc
- version: ${{ env.GCC_V }}
+ version: ${{ env.GCC }}
- name: Setup Micromamba
uses: mamba-org/setup-micromamba@v1
@@ -183,7 +231,15 @@ jobs:
run: |
meson setup builddir -Ddebug=false --prefix=$(pwd) --libdir=bin
meson install -C builddir
- meson test --verbose --no-rebuild -C builddir
+
+ - name: Show build log
+ if: failure()
+ working-directory: modflow6
+ run: cat builddir/meson-logs/meson-log.txt
+
+ - name: Unit test programs
+ working-directory: modflow6
+ run: meson test --verbose --no-rebuild -C builddir
- name: Update flopy
working-directory: modflow6/autotest
@@ -207,11 +263,9 @@ jobs:
pytest -v -n auto --durations 0 -m "not large"
fi
- # steps below run only on Linux to test distribution procedures, e.g.
- # compiling binaries, building documentation
- name: Checkout usgslatex
if: runner.os == 'Linux'
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
repository: MODFLOW-USGS/usgslatex
path: usgslatex
@@ -230,31 +284,15 @@ jobs:
if: runner.os == 'Linux'
working-directory: usgslatex/usgsLaTeX
run: sudo ./install.sh --all-users
-
- - name: Install dependencies for ex-gwf-twri example model
- if: runner.os == 'Linux'
- working-directory: modflow6-examples/etc
- run: |
- # install extra Python packages
- pip install -r requirements.pip.txt
-
- # the example model needs executables to be on the path
- echo "${{ github.workspace }}/modflow6/bin" >> $GITHUB_PATH
- echo "${{ github.workspace }}/modflow6/bin/downloaded" >> $GITHUB_PATH
-
- - name: Build ex-gwf-twri example model
- if: runner.os == 'Linux'
- working-directory: modflow6-examples/scripts
- run: python ex-gwf-twri.py
-
+
- name: Test distribution scripts
working-directory: modflow6/distribution
env:
GITHUB_TOKEN: ${{ github.token }}
run: pytest -v --durations 0
- test_gfortran_previous:
- name: Test gfortran (${{ matrix.GCC_V }}, ${{ matrix.os }})
+ test_intel_fortran:
+ name: Test intel fortran
needs:
- lint
- build
@@ -263,90 +301,23 @@ jobs:
strategy:
fail-fast: false
matrix:
- os: [ ubuntu-20.04 ]
- GCC_V: [ 7, 8, 9, 10, 11 ]
- defaults:
- run:
- shell: bash -l {0}
- env:
- FC: gfortran
- steps:
-
- - name: Checkout modflow6
- uses: actions/checkout@v3
- with:
- path: modflow6
-
- - name: Checkout modflow6-testmodels
- uses: actions/checkout@v3
- with:
- repository: MODFLOW-USGS/modflow6-testmodels
- path: modflow6-testmodels
-
- - name: Setup GNU Fortran ${{ matrix.GCC_V }}
- uses: awvwgk/setup-fortran@main
- with:
- compiler: gcc
- version: ${{ matrix.GCC_V }}
-
- - name: Setup Micromamba
- uses: mamba-org/setup-micromamba@v1
- with:
- environment-file: modflow6/environment.yml
- cache-downloads: true
- cache-environment: true
-
- - name: Update flopy
- working-directory: modflow6/autotest
- run: python update_flopy.py
-
- - name: Build modflow6
- working-directory: modflow6
- run: |
- meson setup builddir -Ddebug=false --prefix=$(pwd) --libdir=bin
- meson install -C builddir
- meson test --verbose --no-rebuild -C builddir
-
- - name: Get executables
- working-directory: modflow6/autotest
- env:
- GITHUB_TOKEN: ${{ github.token }}
- run: pytest -v --durations 0 get_exes.py
-
- - name: Test modflow6
- working-directory: modflow6/autotest
- env:
- REPOS_PATH: ${{ github.workspace }}
- run: |
- if [ "${{ github.ref_name }}" == "master" ]; then
- pytest -v -n auto --durations 0 -m "not large and not developmode"
- else
- pytest -v -n auto --durations 0 -m "not large"
- fi
+ include:
+ - {os: ubuntu-22.04, compiler: intel-classic, version: 2021.7}
+ - {os: macos-12, compiler: intel-classic, version: 2021.7}
+ - {os: windows-2022, compiler: intel-classic, version: 2021.7}
- test_ifort:
- name: Test (ifort)
- needs:
- - lint
- - build
- - smoke_test
- runs-on: ${{ matrix.os }}
- strategy:
- fail-fast: false
- matrix:
- os: [ ubuntu-latest, macos-latest, windows-latest ]
defaults:
run:
shell: bash -l {0}
steps:
- name: Checkout modflow6
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
path: modflow6
- name: Checkout modflow6-testmodels
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
repository: MODFLOW-USGS/modflow6-testmodels
path: modflow6-testmodels
@@ -362,50 +333,41 @@ jobs:
cache-downloads: true
- name: Setup Intel Fortran
- uses: modflowpy/install-intelfortran-action@v1
+ uses: fortran-lang/setup-fortran@v1
+ with:
+ compiler: ${{ matrix.compiler }}
+ version: ${{ matrix.version }}
- name: Update version files
working-directory: modflow6/distribution
run: python update_version.py
- name: Build modflow6
- if: runner.os != 'Windows'
working-directory: modflow6
run: |
meson setup builddir -Ddebug=false --prefix=$(pwd) --libdir=bin
meson install -C builddir
- meson test --verbose --no-rebuild -C builddir
- - name: Build modflow6 (Windows)
- if: runner.os == 'Windows'
+ - name: Show build log
+ if: failure()
working-directory: modflow6
- shell: pwsh
- run: |
- meson setup builddir -Ddebug=false --prefix=$(pwd) --libdir=bin
- meson install -C builddir
- meson test --verbose --no-rebuild -C builddir
+ run: cat builddir/meson-logs/meson-log.txt
+
+ - name: Unit test programs
+ working-directory: modflow6
+ run: meson test --verbose --no-rebuild -C builddir
- name: Update flopy
working-directory: modflow6/autotest
run: python update_flopy.py
- name: Get executables
- if: runner.os != 'Windows'
working-directory: modflow6/autotest
env:
GITHUB_TOKEN: ${{ github.token }}
run: pytest -v --durations 0 get_exes.py
-
- - name: Get executables (Windows)
- if: runner.os == 'Windows'
- working-directory: modflow6/autotest
- shell: pwsh
- env:
- GITHUB_TOKEN: ${{ github.token }}
- run: pytest -v --durations 0 get_exes.py
- name: Test programs
- if: runner.os != 'Windows'
working-directory: modflow6/autotest
env:
REPOS_PATH: ${{ github.workspace }}
@@ -416,30 +378,8 @@ jobs:
pytest -v -n auto --durations 0 -m "not large"
fi
- - name: Test programs (Windows)
- if: runner.os == 'Windows'
- working-directory: modflow6/autotest
- shell: pwsh
- env:
- REPOS_PATH: ${{ github.workspace }}
- run: |
- if ( "${{ github.ref_name }}" -eq "master" ) {
- pytest -v -n auto --durations 0 -m "not large and not developmode"
- } else {
- pytest -v -n auto --durations 0 -m "not large"
- }
-
- name: Test scripts
- if: runner.os != 'Windows'
- working-directory: modflow6/distribution
- env:
- GITHUB_TOKEN: ${{ github.token }}
- run: pytest -v --durations 0
-
- - name: Test scripts (Windows)
- if: runner.os == 'Windows'
working-directory: modflow6/distribution
- shell: pwsh
env:
GITHUB_TOKEN: ${{ github.token }}
run: pytest -v --durations 0
@@ -464,7 +404,7 @@ jobs:
steps:
- name: Checkout modflow6
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
path: modflow6
@@ -488,14 +428,14 @@ jobs:
mpi: msmpi
- name: Setup GNU Fortran ${{ env.GCC_V }}
- uses: awvwgk/setup-fortran@main
+ uses: fortran-lang/setup-fortran@v1
with:
compiler: gcc
version: ${{ env.GCC_V }}
- name: Cache PETSc
id: cache-petsc
- uses: actions/cache@v3
+ uses: actions/cache@v4
with:
path: petsc
key: ${{ runner.os }}-petsc
diff --git a/.github/workflows/compilers.yml b/.github/workflows/compilers.yml
new file mode 100644
index 00000000000..e3fcd7dd85b
--- /dev/null
+++ b/.github/workflows/compilers.yml
@@ -0,0 +1,366 @@
+name: MODFLOW 6 compiler checks
+on:
+ push:
+ branches:
+ - v[0-9]+.[0-9]+.[0-9]+*
+ - master
+ pull_request:
+ branches:
+ - master
+ schedule:
+ - cron: 0 0 * * 0 # 12am utc every sunday
+ # workflow_dispatch trigger to start release via GitHub UI or CLI, see
+ # https://docs.github.com/en/actions/managing-workflow-runs/manually-running-a-workflow
+ workflow_dispatch:
+jobs:
+ test:
+ name: Test
+ runs-on: ${{ matrix.os }}
+ strategy:
+ fail-fast: false
+ matrix:
+ # combinations from https://github.com/fortran-lang/setup-fortran#runner-compatibility
+ include:
+ # gfortran
+ - {os: ubuntu-20.04, compiler: gcc, version: 7}
+ - {os: ubuntu-20.04, compiler: gcc, version: 8}
+ - {os: ubuntu-20.04, compiler: gcc, version: 9}
+ - {os: ubuntu-20.04, compiler: gcc, version: 10}
+ - {os: ubuntu-20.04, compiler: gcc, version: 11}
+ - {os: ubuntu-22.04, compiler: gcc, version: 9}
+ - {os: ubuntu-22.04, compiler: gcc, version: 10}
+ - {os: ubuntu-22.04, compiler: gcc, version: 11}
+ - {os: ubuntu-22.04, compiler: gcc, version: 12}
+ - {os: ubuntu-22.04, compiler: gcc, version: 13}
+ - {os: macos-11, compiler: gcc, version: 7}
+ - {os: macos-11, compiler: gcc, version: 8}
+ - {os: macos-11, compiler: gcc, version: 9}
+ - {os: macos-11, compiler: gcc, version: 10}
+ - {os: macos-11, compiler: gcc, version: 11}
+ - {os: macos-11, compiler: gcc, version: 12}
+ - {os: macos-11, compiler: gcc, version: 13}
+ - {os: macos-12, compiler: gcc, version: 7}
+ - {os: macos-12, compiler: gcc, version: 8}
+ - {os: macos-12, compiler: gcc, version: 9}
+ - {os: macos-12, compiler: gcc, version: 10}
+ - {os: macos-12, compiler: gcc, version: 11}
+ - {os: macos-12, compiler: gcc, version: 12}
+ - {os: macos-12, compiler: gcc, version: 13}
+ - {os: windows-2019, compiler: gcc, version: 9}
+ - {os: windows-2019, compiler: gcc, version: 10}
+ - {os: windows-2019, compiler: gcc, version: 11}
+ - {os: windows-2019, compiler: gcc, version: 12}
+ - {os: windows-2019, compiler: gcc, version: 13}
+ - {os: windows-2022, compiler: gcc, version: 9}
+ - {os: windows-2022, compiler: gcc, version: 10}
+ - {os: windows-2022, compiler: gcc, version: 11}
+ - {os: windows-2022, compiler: gcc, version: 12}
+ - {os: windows-2022, compiler: gcc, version: 13}
+ # ifx
+ - {os: ubuntu-20.04, compiler: intel, version: 2023.2}
+ - {os: ubuntu-20.04, compiler: intel, version: 2023.1}
+ - {os: ubuntu-20.04, compiler: intel, version: "2023.0"}
+ - {os: ubuntu-20.04, compiler: intel, version: 2022.2.1}
+ - {os: ubuntu-20.04, compiler: intel, version: 2022.2}
+ - {os: ubuntu-20.04, compiler: intel, version: 2022.1}
+ - {os: ubuntu-20.04, compiler: intel, version: "2022.0"}
+ - {os: ubuntu-20.04, compiler: intel, version: 2021.4}
+ - {os: ubuntu-20.04, compiler: intel, version: 2021.2}
+ - {os: ubuntu-20.04, compiler: intel, version: 2021.1}
+ - {os: ubuntu-22.04, compiler: intel, version: 2023.2}
+ - {os: ubuntu-22.04, compiler: intel, version: 2023.1}
+ - {os: ubuntu-22.04, compiler: intel, version: "2023.0"}
+ - {os: ubuntu-22.04, compiler: intel, version: 2022.2.1}
+ - {os: ubuntu-22.04, compiler: intel, version: 2022.2}
+ - {os: ubuntu-22.04, compiler: intel, version: 2022.1}
+ - {os: ubuntu-22.04, compiler: intel, version: "2022.0"}
+ - {os: ubuntu-22.04, compiler: intel, version: 2021.4}
+ - {os: ubuntu-22.04, compiler: intel, version: 2021.2}
+ - {os: ubuntu-22.04, compiler: intel, version: 2021.1}
+ # no ifx on mac
+ - {os: windows-2019, compiler: intel, version: 2023.2}
+ - {os: windows-2019, compiler: intel, version: 2023.1}
+ - {os: windows-2019, compiler: intel, version: "2023.0"}
+ - {os: windows-2019, compiler: intel, version: 2022.2}
+ - {os: windows-2019, compiler: intel, version: 2022.1}
+ - {os: windows-2022, compiler: intel, version: 2023.2}
+ - {os: windows-2022, compiler: intel, version: 2023.1}
+ - {os: windows-2022, compiler: intel, version: "2023.0"}
+ - {os: windows-2022, compiler: intel, version: 2022.2}
+ - {os: windows-2022, compiler: intel, version: 2022.1}
+ # ifort
+ - {os: ubuntu-20.04, compiler: intel-classic, version: "2021.10"}
+ - {os: ubuntu-20.04, compiler: intel-classic, version: 2021.9}
+ - {os: ubuntu-20.04, compiler: intel-classic, version: 2021.8}
+ - {os: ubuntu-20.04, compiler: intel-classic, version: 2021.7}
+ - {os: ubuntu-20.04, compiler: intel-classic, version: 2021.6}
+ - {os: ubuntu-20.04, compiler: intel-classic, version: 2021.5}
+ - {os: ubuntu-20.04, compiler: intel-classic, version: 2021.4}
+ - {os: ubuntu-20.04, compiler: intel-classic, version: 2021.3}
+ - {os: ubuntu-20.04, compiler: intel-classic, version: 2021.2}
+ - {os: ubuntu-20.04, compiler: intel-classic, version: 2021.1}
+ - {os: ubuntu-22.04, compiler: intel-classic, version: "2021.10"}
+ - {os: ubuntu-22.04, compiler: intel-classic, version: 2021.9}
+ - {os: ubuntu-22.04, compiler: intel-classic, version: 2021.8}
+ - {os: ubuntu-22.04, compiler: intel-classic, version: 2021.7}
+ - {os: ubuntu-22.04, compiler: intel-classic, version: 2021.6}
+ - {os: ubuntu-22.04, compiler: intel-classic, version: 2021.5}
+ - {os: ubuntu-22.04, compiler: intel-classic, version: 2021.4}
+ - {os: ubuntu-22.04, compiler: intel-classic, version: 2021.3}
+ - {os: ubuntu-22.04, compiler: intel-classic, version: 2021.2}
+ - {os: ubuntu-22.04, compiler: intel-classic, version: 2021.1}
+ - {os: macos-11, compiler: intel-classic, version: "2021.10"}
+ - {os: macos-11, compiler: intel-classic, version: 2021.9}
+ - {os: macos-11, compiler: intel-classic, version: 2021.8}
+ - {os: macos-11, compiler: intel-classic, version: 2021.7}
+ - {os: macos-11, compiler: intel-classic, version: 2021.6}
+ - {os: macos-11, compiler: intel-classic, version: 2021.5}
+ - {os: macos-11, compiler: intel-classic, version: 2021.4}
+ - {os: macos-11, compiler: intel-classic, version: 2021.3}
+ - {os: macos-11, compiler: intel-classic, version: 2021.2}
+ - {os: macos-11, compiler: intel-classic, version: 2021.1}
+ - {os: macos-12, compiler: intel-classic, version: "2021.10"}
+ - {os: macos-12, compiler: intel-classic, version: 2021.9}
+ - {os: macos-12, compiler: intel-classic, version: 2021.8}
+ - {os: macos-12, compiler: intel-classic, version: 2021.7}
+ - {os: macos-12, compiler: intel-classic, version: 2021.6}
+ - {os: macos-12, compiler: intel-classic, version: 2021.5}
+ - {os: macos-12, compiler: intel-classic, version: 2021.4}
+ - {os: macos-12, compiler: intel-classic, version: 2021.3}
+ - {os: macos-12, compiler: intel-classic, version: 2021.2}
+ - {os: macos-12, compiler: intel-classic, version: 2021.1}
+ - {os: windows-2019, compiler: intel-classic, version: "2021.10"}
+ - {os: windows-2019, compiler: intel-classic, version: 2021.9}
+ - {os: windows-2019, compiler: intel-classic, version: 2021.8}
+ - {os: windows-2019, compiler: intel-classic, version: 2021.7}
+ - {os: windows-2019, compiler: intel-classic, version: 2021.6}
+ - {os: windows-2022, compiler: intel-classic, version: "2021.10"}
+ - {os: windows-2022, compiler: intel-classic, version: 2021.9}
+ - {os: windows-2022, compiler: intel-classic, version: 2021.8}
+ - {os: windows-2022, compiler: intel-classic, version: 2021.7}
+ - {os: windows-2022, compiler: intel-classic, version: 2021.6}
+ defaults:
+ run:
+ shell: bash -l {0}
+ steps:
+ - name: Checkout modflow6
+ uses: actions/checkout@v4
+ with:
+ path: modflow6
+
+ - name: Checkout modflow6-testmodels
+ uses: actions/checkout@v4
+ with:
+ repository: MODFLOW-USGS/modflow6-testmodels
+ path: modflow6-testmodels
+
+ - name: Checkout modflow6-examples
+ uses: actions/checkout@v4
+ with:
+ repository: MODFLOW-USGS/modflow6-examples
+ path: modflow6-examples
+
+ - name: Setup ${{ matrix.compiler }} ${{ matrix.version }}
+ uses: fortran-lang/setup-fortran@v1
+ with:
+ compiler: ${{ matrix.compiler}}
+ version: ${{ matrix.version }}
+
+ - name: Setup Micromamba
+ uses: mamba-org/setup-micromamba@v1
+ with:
+ environment-file: modflow6/environment.yml
+ init-shell: >-
+ bash
+ powershell
+ cache-environment: true
+ cache-downloads: true
+
+ - name: Build modflow6
+ working-directory: modflow6
+ run: |
+ meson setup builddir -Ddebug=false --prefix=$(pwd) --libdir=bin
+ meson install -C builddir
+
+ - name: Show build log
+ if: failure()
+ working-directory: modflow6
+ run: cat builddir/meson-logs/meson-log.txt
+
+ - name: Upload build log
+ if: failure()
+ uses: actions/upload-artifact@v3
+ with:
+ name: meson-log.txt
+ path: modflow6/builddir/meson-logs/meson-log.txt
+
+ - name: Unit test programs
+ if: success()
+ working-directory: modflow6
+ run: meson test --verbose --no-rebuild -C builddir
+
+ - name: Create compile report
+ if: success() || failure()
+ shell: bash
+ run: |
+ if [[ "${{ job.status }}" == "success" ]]; then
+ support="✓"
+ else
+ support=""
+ fi
+
+ mkdir -p compat
+ prefix="${{ matrix.os }},${{ matrix.compiler }},${{ matrix.version }}"
+ echo "$prefix,$support" >> "compat/comp_${prefix//,/_}.csv"
+
+ - name: Update flopy
+ if: success()
+ working-directory: modflow6/autotest
+ run: python update_flopy.py
+
+ - name: Get executables
+ if: success()
+ working-directory: modflow6/autotest
+ env:
+ GITHUB_TOKEN: ${{ github.token }}
+ run: pytest -v --durations 0 get_exes.py
+
+ - name: Test modflow6
+ if: success()
+ working-directory: modflow6/autotest
+ env:
+ REPOS_PATH: ${{ github.workspace }}
+ run: pytest -v -n auto --durations 0
+
+ - name: Create test report
+ if: success() || failure()
+ shell: bash
+ run: |
+ if [[ "${{ job.status }}" == "success" ]]; then
+ support="✓"
+ else
+ support=""
+ fi
+
+ mkdir -p compat
+ prefix="${{ matrix.os }},${{ matrix.compiler }},${{ matrix.version }}"
+ echo "$prefix,$support" >> "compat/test_${prefix//,/_}.csv"
+
+ - name: Upload reports
+ if: success() || failure()
+ uses: actions/upload-artifact@v3
+ with:
+ name: compat
+ path: compat/*.csv
+
+ report:
+ name: Make compatibility report
+ if: success() || failure()
+ needs: test
+ runs-on: ubuntu-latest
+ permissions:
+ contents: write
+ pull-requests: write
+ steps:
+
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Setup Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: 3.9
+
+ - name: Install packages
+ run: pip install tabulate pandas
+
+ - name: Download reports
+ uses: actions/download-artifact@v3
+ with:
+ name: compat
+ path: .github/compat/new
+
+ - name: Concatenate reports
+ working-directory: .github/compat
+ run: |
+ cols="runner,compiler,version,support"
+ echo "$cols" > long_comp.csv
+ echo "$cols" > long_test.csv
+ cat new/comp*.csv >> long_comp.csv
+ cat new/test*.csv >> long_test.csv
+
+ - name: Make wide CSV and MD tables
+ working-directory: .github/compat
+ id: merge-reports
+ run: |
+ python ../common/wide_compat_reports.py "long_comp.csv" "comp.csv"
+ python ../common/wide_compat_reports.py "long_test.csv" "test.csv"
+
+ # only upload wide CSVs and Markdown tables
+ - name: Upload artifacts
+ uses: actions/upload-artifact@v3
+ with:
+ name: compat
+ path: |
+ .github/compat/comp.*
+ .github/compat/test.*
+
+ # update DEVELOPER.md if this is not a push event and there are any changes
+ - name: Check for changes
+ working-directory: .github/compat
+ if: github.event_name != 'pull_request'
+ id: diff
+ run: |
+ if ! [ -f comp.csv ]; then
+ echo "diff=false" >> $GITHUB_OUTPUT
+ exit 0
+ fi
+
+ diff_comp=$(git diff comp.csv)
+ diff_test=$(git diff test.csv)
+ if [[ ( $diff_comp == "" ) && ( $diff_test == "" ) ]]; then
+ echo "No changes found"
+ echo "diff=false" >> $GITHUB_OUTPUT
+ else
+ echo "comp.csv diff:"
+ echo "$diff_comp"
+ echo "test.csv diff:"
+ echo "$diff_test"
+ echo "diff=true" >> $GITHUB_OUTPUT
+ fi
+
+ - name: Update DEVELOPER.md
+ if: ${{ steps.diff.outputs.diff == 'true' && github.event_name != 'pull_request' }}
+ run: |
+ python .github/common/update_compat_tables.py "compile" ".github/compat/comp.md" "DEVELOPER.md"
+ python .github/common/update_compat_tables.py "test" ".github/compat/test.md" "DEVELOPER.md"
+
+ - name: Print DEVELOPER.md diff
+ if: ${{ steps.diff.outputs.diff == 'true' && github.event_name != 'pull_request' }}
+ run: git diff DEVELOPER.md
+
+ - name: Create pull request
+ if: ${{ steps.diff.outputs.diff == 'true' && github.event_name != 'pull_request' }}
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ run: |
+ # one at a time
+ head="compat"
+ id=$(gh pr list -H $head -s open --json id -q ".[0].id")
+ [[ -n "${id// /}" ]] && (echo "PR already open"; exit 0) || (echo "opening PR")
+
+ # setup bot user
+ git config user.name "github-actions[bot]"
+ git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
+
+ # create new branch
+ git switch -c "$head"
+
+ # commit and push
+ git add DEVELOPER.md .github/compat/comp.csv .github/compat/test.csv
+ git commit -m "Update compatibility tables"
+ git push -u origin "$head"
+
+ # open PR
+ cat <(echo '### Compile') <(echo) .github/compat/comp.md <(echo) <(echo '### Test') <(echo) .github/compat/test.md > compat.md
+ gh pr create -B "${{ github.event.repository.default_branch }}" -H "$head" --title "Update compile/test compatibility tables" --body-file compat.md
\ No newline at end of file
diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml
index 1da492fc9c5..35f630e6bc5 100644
--- a/.github/workflows/docs.yml
+++ b/.github/workflows/docs.yml
@@ -1,18 +1,16 @@
name: MODFLOW 6 documentation
on:
push:
- branches:
- - master
- - develop
- - ci-diagnose*
paths-ignore:
- '.github/workflows/release.yml'
+ - '.hpc/**'
pull_request:
branches:
- master
- develop
paths-ignore:
- '.github/workflows/release.yml'
+ - '.hpc/**'
jobs:
rtd_build:
name: Build ReadTheDocs
@@ -25,18 +23,18 @@ jobs:
steps:
- name: Checkout modflow6
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
path: modflow6
- name: Checkout modflow6-examples
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
repository: MODFLOW-USGS/modflow6-examples
path: modflow6-examples
- name: Checkout usgslatex
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
repository: MODFLOW-USGS/usgslatex
path: usgslatex
@@ -69,14 +67,14 @@ jobs:
run: pytest -v build_mfio_tex.py
- name: Setup GNU Fortran ${{ env.GCC_V }}
- uses: awvwgk/setup-fortran@main
+ uses: fortran-lang/setup-fortran@v1
with:
compiler: gcc
version: ${{ env.GCC_V }}
- name: Cache modflow6 examples
id: cache-examples
- uses: actions/cache@v3
+ uses: actions/cache@v4
with:
path: modflow6-examples/examples
key: modflow6-examples-${{ hashFiles('modflow6-examples/scripts/**') }}
@@ -95,9 +93,9 @@ jobs:
- name: Build example models
if: steps.cache-examples.outputs.cache-hit != 'true'
- working-directory: modflow6-examples/etc
+ working-directory: modflow6-examples/autotest
run: |
- python ci_build_files.py
+ pytest -v -n auto test_scripts.py --init
ls -lh ../examples/
- name: Run benchmarks
@@ -105,21 +103,35 @@ jobs:
run: python benchmark.py
env:
GITHUB_TOKEN: ${{ github.token }}
-
- - name: Run sphinx
- working-directory: modflow6/.build_rtd_docs
- run: make html
-
+
- name: Show benchmarks
working-directory: modflow6/distribution
run: cat run-time-comparison.md
-
+
- name: Upload benchmarks
uses: actions/upload-artifact@v3
with:
name: run-time-comparison
path: modflow6/distribution/run-time-comparison.md
+ - name: Collect deprecations
+ working-directory: modflow6/doc/mf6io/mf6ivar
+ run: python deprecations.py
+
+ - name: Show deprecations
+ working-directory: modflow6/doc/mf6io/mf6ivar/md
+ run: cat deprecations.md
+
+ - name: Upload deprecations
+ uses: actions/upload-artifact@v3
+ with:
+ name: deprecations
+ path: modflow6/doc/mf6io/mf6ivar/md/deprecations.md
+
+ - name: Run sphinx
+ working-directory: modflow6/.build_rtd_docs
+ run: make html
+
- name: Upload results
uses: actions/upload-artifact@v3
with:
@@ -139,7 +151,7 @@ jobs:
if: github.repository_owner == 'MODFLOW-USGS' && github.event_name == 'push'
steps:
- name: Checkout repo
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: Trigger RTDs build on push to repo branches
uses: dfm/rtds-action@v1
@@ -160,7 +172,7 @@ jobs:
shell: bash -l {0}
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
- name: Print branch name
run: echo ${{env.branch-name}}
@@ -208,7 +220,7 @@ jobs:
working-directory: ${{env.working-directory}}
- name: upload pages artifact
- uses: actions/upload-pages-artifact@v1
+ uses: actions/upload-pages-artifact@v2
with:
path: ${{env.working-directory}}/html
@@ -225,4 +237,4 @@ jobs:
steps:
- name: Deploy to GitHub Pages
id: deployment
- uses: actions/deploy-pages@v2
+ uses: actions/deploy-pages@v3
diff --git a/.github/workflows/large.yml b/.github/workflows/large.yml
index 291dcc16673..e9cf36c98c5 100644
--- a/.github/workflows/large.yml
+++ b/.github/workflows/large.yml
@@ -3,38 +3,53 @@ on:
schedule:
- cron: '0 6 * * *' # run at 6 AM UTC every day
jobs:
+ # caching only necessary on Windows
cache_ifort:
name: Cache Intel OneAPI compilers
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
- os: [ ubuntu-22.04, macos-12, windows-2022 ]
+ include:
+ # ifx
+ - {os: windows-2022, compiler: intel, version: 2022.2}
+ # ifort
+ - {os: windows-2022, compiler: intel-classic, version: "2021.10"}
+ - {os: windows-2022, compiler: intel-classic, version: 2021.9}
+ - {os: windows-2022, compiler: intel-classic, version: 2021.8}
+ - {os: windows-2022, compiler: intel-classic, version: 2021.7}
+ - {os: windows-2022, compiler: intel-classic, version: 2021.6}
steps:
- - name: Setup Intel Fortran
- uses: modflowpy/install-intelfortran-action@v1
+ - name: Setup ${{ matrix.compiler }} ${{ matrix.version }}
+ uses: fortran-lang/setup-fortran@v1
+ with:
+ compiler: ${{ matrix.compiler }}
+ version: ${{ matrix.version }}
test:
name: Test
runs-on: ubuntu-22.04
strategy:
fail-fast: false
matrix:
- fc: [ ifort, gfortran ]
- repo: [ examples, largetestmodels ]
+ include:
+ - {compiler: gcc, version: 13, repo: examples}
+ - {compiler: gcc, version: 13, repo: largetestmodels}
+ - {compiler: intel, version: 2022.2.1, repo: examples}
+ - {compiler: intel, version: 2022.2.1, repo: largetestmodels}
+ - {compiler: intel-classic, version: 2021.6, repo: examples}
+ - {compiler: intel-classic, version: 2021.6, repo: largetestmodels}
defaults:
run:
shell: bash -l {0}
- env:
- GCC_V: 12
steps:
- name: Checkout modflow6
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
path: modflow6
- name: Checkout modflow6-${{ matrix.repo }}
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
repository: MODFLOW-USGS/modflow6-${{ matrix.repo }}
path: modflow6-${{ matrix.repo }}
@@ -46,20 +61,15 @@ jobs:
cache-downloads: true
cache-environment: true
- - name: Setup gfortran ${{ env.GCC_V }}
- if: matrix.FC == 'gfortran'
- uses: awvwgk/setup-fortran@main
+ - name: Setup compilers (${{ matrix.compiler }} ${{ matrix.version }})
+ uses: fortran-lang/setup-fortran@v1
with:
- compiler: gcc
- version: ${{ env.GCC_V }}
-
- - name: Setup ifort
- if: matrix.fc == 'ifort'
- uses: modflowpy/install-intelfortran-action@v1
+ compiler: ${{ matrix.compiler }}
+ version: ${{ matrix.version }}
- name: Cache modflow6 examples
id: cache-examples
- uses: actions/cache@v3
+ uses: actions/cache@v4
with:
path: modflow6-examples/examples
key: modflow6-examples-${{ hashFiles('modflow6-examples/scripts/**') }}
@@ -72,9 +82,9 @@ jobs:
- name: Build example models
if: matrix.repo == 'examples' && steps.cache-examples.outputs.cache-hit != 'true'
- working-directory: modflow6-examples/etc
+ working-directory: modflow6-examples/autotest
run: |
- python ci_build_files.py
+ pytest -v -n auto test_scripts.py --init
ls -lh ../examples/
- name: Add Micromamba Scripts dir to path (Windows)
@@ -103,4 +113,4 @@ jobs:
- name: Run tests
working-directory: modflow6/autotest
- run: pytest -v -n auto --durations 0 test_z03_${{ matrix.repo }}.py
\ No newline at end of file
+ run: pytest -v -n auto --durations 0 test_${{ matrix.repo }}.py
\ No newline at end of file
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 1973766a474..527952f547c 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -13,6 +13,16 @@ on:
description: 'Branch to release from.'
required: true
type: string
+ compiler_toolchain:
+ description: 'Compiler toolchain to use. For supported options see https://github.com/MODFLOW-USGS/modflow6/blob/develop/DEVELOPER.md#compiler-compatibility.'
+ required: false
+ type: string
+ default: 'intel-classic'
+ compiler_version:
+ description: 'Compiler version to use. For supported options see https://github.com/MODFLOW-USGS/modflow6/blob/develop/DEVELOPER.md#compiler-compatibility.'
+ required: false
+ type: string
+ default: '2021.7'
developmode:
description: 'Build binaries in develop mode. If false, IDEVELOPMODE is set to 0.'
required: false
@@ -68,7 +78,7 @@ jobs:
distname: ${{ steps.set_version.outputs.distname }}
steps:
- name: Checkout modflow6
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
repository: ${{ github.repository_owner }}/modflow6
path: modflow6
@@ -84,8 +94,11 @@ jobs:
bash
powershell
- - name: Setup Intel Fortran
- uses: modflowpy/install-intelfortran-action@v1
+ - name: Setup ${{ inputs.compiler_toolchain }} ${{ inputs.compiler_version }}
+ uses: fortran-lang/setup-fortran@v1
+ with:
+ compiler: ${{ inputs.compiler_toolchain }}
+ version: ${{ inputs.compiler_version }}
- name: Set version number
id: set_version
@@ -139,14 +152,14 @@ jobs:
# only run steps below if inputs.run_tests is true
- name: Checkout modflow6-testmodels
if: inputs.run_tests == true
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
repository: MODFLOW-USGS/modflow6-testmodels
path: modflow6-testmodels
- name: Checkout modflow6-examples
if: inputs.run_tests == true
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
repository: MODFLOW-USGS/modflow6-examples
path: modflow6-examples
@@ -192,7 +205,7 @@ jobs:
# compiling binaries, building documentation
- name: Checkout usgslatex
if: ${{ runner.os == 'Linux' && inputs.run_tests == true }}
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
repository: MODFLOW-USGS/usgslatex
path: usgslatex
@@ -212,22 +225,6 @@ jobs:
working-directory: usgslatex/usgsLaTeX
run: sudo ./install.sh --all-users
- - name: Install dependencies for ex-gwf-twri example model
- if: ${{ runner.os == 'Linux' && inputs.run_tests == true }}
- working-directory: modflow6-examples/etc
- run: |
- # install extra Python packages
- pip install -r requirements.pip.txt
-
- # the example model needs executables to be on the path
- echo "${{ github.workspace }}/modflow6/bin" >> $GITHUB_PATH
- echo "${{ github.workspace }}/modflow6/bin/downloaded" >> $GITHUB_PATH
-
- - name: Build ex-gwf-twri example model
- if: ${{ runner.os == 'Linux' && inputs.run_tests == true }}
- working-directory: modflow6-examples/scripts
- run: python ex-gwf-twri.py
-
- name: Test distribution scripts
if: ${{ inputs.run_tests == true }}
working-directory: modflow6/distribution
@@ -245,20 +242,20 @@ jobs:
steps:
- name: Checkout modflow6
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
repository: ${{ github.repository_owner }}/modflow6
path: modflow6
ref: ${{ inputs.branch }}
- name: Checkout modflow6-examples
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
repository: MODFLOW-USGS/modflow6-examples
path: modflow6-examples
- name: Checkout usgslatex
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
repository: MODFLOW-USGS/usgslatex
path: usgslatex
@@ -283,8 +280,11 @@ jobs:
cache-downloads: true
cache-environment: true
- - name: Setup Intel Fortran
- uses: modflowpy/install-intelfortran-action@v1
+ - name: Setup ${{ inputs.compiler_toolchain }} ${{ inputs.compiler_version }}
+ uses: fortran-lang/setup-fortran@v1
+ with:
+ compiler: ${{ inputs.compiler_toolchain }}
+ version: ${{ inputs.compiler_version }}
- name: Update version
id: update_version
@@ -300,51 +300,44 @@ jobs:
fi
eval "$cmd"
+ - name: Update FloPy classes
+ working-directory: modflow6/autotest
+ run: python update_flopy.py
+
- name: Download pre-built binaries
uses: actions/download-artifact@v3
with:
name: bin-${{ runner.os }}
path: bin
+
+ # execute permissions may not have survived artifact upload/download
+ - name: Set executable permissions
+ working-directory: modflow6-examples/etc
+ run: |
+ chmod +x "${{ github.workspace }}/bin/mf6"
+ chmod +x "${{ github.workspace }}/bin/mf5to6"
+ chmod +x "${{ github.workspace }}/bin/zbud6"
- name: Install dependencies for building models
+ if: inputs.full == true
working-directory: modflow6-examples/etc
env:
GITHUB_TOKEN: ${{ github.token }}
run: |
- # install extra Python packages
pip install -r requirements.pip.txt
-
- # the example model needs executables to be on the path
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
-
- # execute permissions may not have survived artifact upload/download
- chmod +x "${{ github.workspace }}/bin/mf6"
- chmod +x "${{ github.workspace }}/bin/mf5to6"
- chmod +x "${{ github.workspace }}/bin/zbud6"
-
- # the example model also needs mf2005
get-modflow "${{ github.workspace }}/bin" --subset mf2005,triangle,gridgen
-
- - name: Update FloPy
- working-directory: modflow6/autotest
- run: python update_flopy.py
-
- - name: Build ex-gwf-twri example model
- if: inputs.full != true
- working-directory: modflow6-examples/scripts
- run: python ex-gwf-twri.py
- name: Build example models
if: inputs.full == true
- working-directory: modflow6-examples/etc
- run: python ci_build_files.py
+ working-directory: modflow6-examples/autotest
+ run: pytest -v -n auto test_scripts.py --init
- - name: Create full docs folder structure
+ - name: Create folder structure
if: inputs.full == true
run: |
+ # Create empty folder structure for the /docs subdirectory
distname=${{ needs.build.outputs.distname }}
-
- # Create a skeleton of the distribution's folder structure to include in the docs
mkdir -p "$distname/doc"
mkdir "$distname/make"
mkdir "$distname/msvs"
@@ -353,19 +346,34 @@ jobs:
cp modflow6/meson.build "$distname/meson.build"
cp -r modflow6-examples/examples "$distname"
cp -r modflow6/src "$distname"
- cp -r modflow6/utils "$distname"
+ cp -r modflow6/utils/mf5to6 "$distname/utils/mf5to6"
+ cp -r modflow6/utils/zonebudget "$distname/utils/zonebudget"
# create LaTeX file describing the folder structure
cd modflow6/doc/ReleaseNotes
python mk_folder_struct.py -dp "${{ github.workspace }}/$distname"
+ - name: Collect deprecations
+ working-directory: modflow6/doc/mf6io/mf6ivar
+ run: |
+ python deprecations.py
+ cat md/deprecations.md
+
+ - name: Upload deprecations
+ uses: actions/upload-artifact@v3
+ with:
+ name: deprecations
+ path: modflow6/doc/mf6io/mf6ivar/md/deprecations.md
+
- name: Build documentation
env:
- # need a GITHUB_TOKEN to download example doc PDF asset from modflow6-examples repo
+ # this step is lazy about building the mf6 examples PDF document, first
+ # trying to download a prebuilt PDF from MODFLOW-USGS/modflow6-examples,
+ # so it needs a GITHUB_TOKEN.
GITHUB_TOKEN: ${{ github.token }}
run: |
mkdir -p "${{ needs.build.outputs.distname }}/doc"
- cmd="python modflow6/distribution/build_docs.py -b bin -o doc -e modflow6-examples"
+ cmd="python modflow6/distribution/build_docs.py -b bin -o doc"
if [[ "${{ inputs.full }}" == "true" ]]; then
cmd="$cmd --full"
fi
@@ -399,14 +407,14 @@ jobs:
shell: bash -l {0}
steps:
- name: Checkout modflow6
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
repository: ${{ github.repository_owner }}/modflow6
path: modflow6
ref: ${{ inputs.branch }}
- name: Checkout modflow6-examples
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
repository: MODFLOW-USGS/modflow6-examples
path: modflow6-examples
@@ -421,8 +429,11 @@ jobs:
bash
powershell
- - name: Setup Intel Fortran
- uses: modflowpy/install-intelfortran-action@v1
+ - name: Setup ${{ inputs.compiler_toolchain }} ${{ inputs.compiler_version }}
+ uses: fortran-lang/setup-fortran@v1
+ with:
+ compiler: ${{ inputs.compiler_toolchain }}
+ version: ${{ inputs.compiler_version }}
- name: Update version
id: update_version
@@ -463,13 +474,9 @@ jobs:
env:
GITHUB_TOKEN: ${{ github.token }}
run: |
- # install extra Python packages
pip install -r modflow6-examples/etc/requirements.pip.txt
-
- # example models need executables to be on the path
distname="${{ needs.build.outputs.distname }}_${{ matrix.ostag }}"
echo "$distname/bin" >> $GITHUB_PATH
-
# execute permissions may not have survived artifact upload/download
chmod +x "$distname/bin/mf6"
chmod +x "$distname/bin/mf5to6"
@@ -487,8 +494,8 @@ jobs:
- name: Build example models
if: inputs.full == true
- working-directory: modflow6-examples/etc
- run: python ci_build_files.py
+ working-directory: modflow6-examples/autotest
+ run: pytest -v -n auto test_scripts.py --init
- name: Build distribution
env:
@@ -523,10 +530,10 @@ jobs:
$distname/utils \
$distname/code.json \
$distname/meson.build \
+ $distname/meson.options \
-x '*.DS_Store' \
-x '*libmf6.lib' \
-x '*idmloader*' \
- -x '*pymake*' \
-x '*obj_temp*' \
-x '*mod_temp*'
else
@@ -538,7 +545,6 @@ jobs:
-x '*.DS_Store' \
-x '*libmf6.lib' \
-x '*idmloader*' \
- -x '*pymake*' \
-x '*obj_temp*' \
-x '*mod_temp*'
fi
@@ -559,9 +565,9 @@ jobs:
$distname/utils \
$distname/code.json \
$distname/meson.build \
+ $distname/meson.options \
-xr!libmf6.lib \
-xr!idmloader \
- -xr!pymake \
-xr!obj_temp \
-xr!mod_temp
else
@@ -572,26 +578,10 @@ jobs:
$distname/code.json \
-xr!libmf6.lib \
-xr!idmloader \
- -xr!pymake \
-xr!obj_temp \
-xr!mod_temp
fi
- # validate only after zipping distribution to avoid accidentally changing any files
- - name: Validate distribution
- run: |
- cmd="pytest -v -s modflow6/distribution/check_dist.py --path ${{ needs.build.outputs.distname }}_${{ matrix.ostag }}"
- if [[ "${{ inputs.approve }}" == "true" ]]; then
- cmd="$cmd --approved"
- fi
- if [[ "${{ inputs.developmode }}" == "false" ]]; then
- cmd="$cmd --releasemode"
- fi
- if [[ "${{ inputs.full }}" == "true" ]]; then
- cmd="$cmd --full"
- fi
- eval "$cmd"
-
- name: Upload distribution
uses: actions/upload-artifact@v3
with:
@@ -604,3 +594,28 @@ jobs:
with:
name: release_notes
path: "${{ needs.build.outputs.distname }}_${{ matrix.ostag }}/doc/release.pdf"
+
+ - name: Check distribution
+ run: |
+ # unzip and validate the archive
+ distname="${{ needs.build.outputs.distname }}_${{ matrix.ostag }}"
+ distfile="$distname.zip"
+ checkdir="check"
+ mkdir $checkdir
+ if [[ "$RUNNER_OS" == "Windows" ]]; then
+ 7z x $distfile -o$checkdir
+ else
+ unzip $distfile -d $checkdir
+ fi
+
+ cmd="pytest -v -s modflow6/distribution/check_dist.py --path $checkdir/$distname"
+ if [[ "${{ inputs.approve }}" == "true" ]]; then
+ cmd="$cmd --approved"
+ fi
+ if [[ "${{ inputs.developmode }}" == "false" ]]; then
+ cmd="$cmd --releasemode"
+ fi
+ if [[ "${{ inputs.full }}" == "true" ]]; then
+ cmd="$cmd --full"
+ fi
+ eval "$cmd"
\ No newline at end of file
diff --git a/.github/workflows/release_dispatch.yml b/.github/workflows/release_dispatch.yml
index 567bab3eae3..3037c98698e 100644
--- a/.github/workflows/release_dispatch.yml
+++ b/.github/workflows/release_dispatch.yml
@@ -24,6 +24,16 @@ on:
description: 'Branch to release from.'
required: true
type: string
+ compiler_toolchain:
+ description: 'Compiler toolchain to use. For supported options see https://github.com/MODFLOW-USGS/modflow6/blob/develop/DEVELOPER.md#compiler-compatibility.'
+ required: false
+ type: string
+ default: 'intel-classic'
+ compiler_version:
+ description: 'Compiler version to use. For supported options see https://github.com/MODFLOW-USGS/modflow6/blob/develop/DEVELOPER.md#compiler-compatibility.'
+ required: false
+ type: string
+ default: '2021.7'
commit_version:
description: 'Commit version numbers back to the develop branch. Not considered if reset is false.'
required: false
@@ -36,7 +46,7 @@ on:
default: false
run_tests:
description: 'Run tests after building binaries.'
- required: true
+ required: false
type: boolean
default: true
version:
@@ -53,6 +63,8 @@ jobs:
shell: bash -l {0}
outputs:
branch: ${{ steps.set_branch.outputs.branch }}
+ compiler_toolchain: ${{ steps.set_compiler.outputs.compiler_toolchain }}
+ compiler_version: ${{ steps.set_compiler.outputs.compiler_version }}
version: ${{ steps.set_version.outputs.version }}
steps:
- name: Set branch
@@ -61,8 +73,8 @@ jobs:
# if branch was provided explicitly via workflow_dispatch, use it
if [[ ("${{ github.event_name }}" == "workflow_dispatch") && (-n "${{ inputs.branch }}") ]]; then
branch="${{ inputs.branch }}"
- # prevent releases from develop or master
- if [[ ("$branch" == "develop") || ("$branch" == "master") ]]; then
+ # prevent releases from master
+ if [[ "$branch" == "master" ]]; then
echo "error: releases may not be triggered from branch $branch"
exit 1
fi
@@ -77,6 +89,26 @@ jobs:
exit 1
fi
echo "branch=$branch" >> $GITHUB_OUTPUT
+ - name: Set compiler
+ id: set_compiler
+ run: |
+ # if compiler toolchain was provided explicitly via workflow_dispatch, use it
+ if [[ ("${{ github.event_name }}" == "workflow_dispatch") && (-n "${{ inputs.compiler_toolchain }}") ]]; then
+ compiler_toolchain="${{ inputs.compiler_toolchain }}"
+ compiler_version="${{ inputs.compiler_version }}"
+ echo "using compiler toolchain $compiler_toolchain version $compiler_version from workflow_dispatch"
+ elif [[ ("${{ github.event_name }}" == "push") && ("${{ github.ref_name }}" != "master") ]]; then
+ # if release was triggered by pushing a release branch, use the default toolchain and version
+ compiler_toolchain="intel-classic"
+ compiler_version="2021.7"
+ echo "using default compiler toolchain $compiler_toolchain version $compiler_version"
+ else
+ # otherwise exit with an error
+ echo "error: this workflow should not have triggered for event ${{ github.event_name }} on branch ${{ github.ref_name }}"
+ exit 1
+ fi
+ echo "compiler_toolchain=$compiler_toolchain" >> $GITHUB_OUTPUT
+ echo "compiler_version=$compiler_version" >> $GITHUB_OUTPUT
- name: Set version
id: set_version
run: |
@@ -103,6 +135,8 @@ jobs:
# If the workflow is manually triggered, the maintainer must manually set approve=true to approve a release.
# If triggered by pushing a release branch, the release is approved if the branch name doesn't contain "rc".
approve: ${{ (github.event_name == 'workflow_dispatch' && inputs.approve == 'true') || (github.event_name != 'workflow_dispatch' && !contains(github.ref_name, 'rc')) }}
+ compiler_toolchain: ${{ needs.set_options.outputs.compiler_toolchain }}
+ compiler_version: ${{ needs.set_options.outputs.compiler_version }}
branch: ${{ needs.set_options.outputs.branch }}
developmode: false
full: true
@@ -110,7 +144,7 @@ jobs:
version: ${{ needs.set_options.outputs.version }}
pr:
name: Draft release PR
- if: ${{ github.event_name == 'push' && github.ref_name != 'master' && (github.event_name == 'workflow_dispatch' && inputs.approve == 'true') || (github.event_name != 'workflow_dispatch' && !contains(github.ref_name, 'rc')) }}
+ if: ${{ github.ref_name != 'master' && ((github.event_name == 'workflow_dispatch' && inputs.approve == 'true') || (github.event_name != 'workflow_dispatch' && !contains(github.ref_name, 'rc'))) }}
needs:
- set_options
- make_dist
@@ -123,7 +157,7 @@ jobs:
shell: bash -l {0}
steps:
- name: Checkout modflow6
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
repository: ${{ github.repository_owner }}/modflow6
ref: ${{ github.ref }}
@@ -185,7 +219,7 @@ jobs:
shell: bash -l {0}
steps:
- name: Checkout modflow6
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
repository: ${{ github.repository_owner }}/modflow6
path: modflow6
@@ -198,7 +232,7 @@ jobs:
cache-environment: true
- name: Download artifacts
- uses: dawidd6/action-download-artifact@v2
+ uses: dawidd6/action-download-artifact@v3
- name: Draft release
working-directory: modflow6
@@ -229,7 +263,7 @@ jobs:
steps:
- name: Checkout modflow6
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
repository: ${{ github.repository_owner }}/modflow6
path: modflow6
diff --git a/.gitignore b/.gitignore
index 56b28c72ec8..959f560e70f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -133,3 +133,8 @@ unittests/
**/__pycache__
**/.benchmarks
+
+# compiler compatibility markdown tables
+.github/compat/*.md
+
+**.DS_Store
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
new file mode 100644
index 00000000000..c518b77c314
--- /dev/null
+++ b/.gitlab-ci.yml
@@ -0,0 +1,66 @@
+# Deploy MODFLOW 6 to HPC systems.
+#
+# Expects shared environment variables...
+#
+# - GIT_REMOTE, name of the mf6 remote to pull from
+# - GIT_BRANCH, name of the mf6 branch to pull from
+# - MODULE_SCRIPT, module update script path (relative to mf6 proj root)
+# - SSH_KNOWN_HOSTS, content of ~/.ssh/known_hosts
+# - SSH_USERNAME, SSH username to login with
+# - SSH_PRIVATE_KEY, SSH private key for user
+#
+# ...and environment-specific variables:
+#
+# - DENALI_HOSTNAME, SSH hostname of cluster
+# - DENALI_SLURM_ACCOUNT, slurm account for jobs
+# - DENALI_SLURM_RESERVATION, slurm reservation for jobs
+# - DENALI_MODULES_PATH, path to root of module system
+# - DENALI_MF6_PREV_VERSION, version of modulefile to use as template
+# - DENALI_MF6_PROJ_ROOT, path to modflow6 project root
+# - DENALI_BUILD_SCRIPT, relpath of script to build mf6
+#
+# ...and likewise for HOVENWEEP_* or other systems
+
+image: ubuntu:20.04
+workflow:
+ rules:
+ - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH && $CI_PIPELINE_SOURCE == 'push'
+before_script:
+ # install ssh agent
+ - 'which ssh-agent || ( apt-get update -y && apt-get install openssh-client git -y )'
+ # configure ssh agent
+ - eval $(ssh-agent -s)
+ # set private key permissions
+ - chmod 400 "$SSH_PRIVATE_KEY"
+ # add keys to the agent store
+ - ssh-add "$SSH_PRIVATE_KEY"
+ # configure known hosts
+ - mkdir -p ~/.ssh
+ - cp "$SSH_KNOWN_HOSTS" ~/.ssh/known_hosts
+ - chmod 644 ~/.ssh/known_hosts
+deploy_denali:
+ environment: denali
+ script: "$CI_PROJECT_DIR/.hpc/deploy.sh"
+ stage: deploy
+ variables:
+ SSH_USERNAME: $SSH_USERNAME
+ SSH_HOSTNAME: $DENALI_HOSTNAME
+ SLURM_ACCOUNT: $DENALI_SLURM_ACCOUNT
+ SLURM_RESERVATION: $DENALI_SLURM_RESERVATION
+ MODULES_PATH: $DENALI_MODULES_PATH
+ MF6_PROJ_ROOT: $DENALI_MF6_PROJ_ROOT
+ MF6_PREV_VERSION: $DENALI_MF6_PREV_VERSION
+ BUILD_SCRIPT: $DENALI_BUILD_SCRIPT
+deploy_hovenweep:
+ environment: hovenweep
+ script: "$CI_PROJECT_DIR/.hpc/deploy.sh"
+ stage: deploy
+ variables:
+ SSH_USERNAME: $SSH_USERNAME
+ SSH_HOSTNAME: $HOVENWEEP_HOSTNAME
+ SLURM_ACCOUNT: $HOVENWEEP_SLURM_ACCOUNT
+ SLURM_RESERVATION: $HOVENWEEP_SLURM_RESERVATION
+ MODULES_PATH: $HOVENWEEP_MODULES_PATH
+ MF6_PROJ_ROOT: $HOVENWEEP_MF6_PROJ_ROOT
+ MF6_PREV_VERSION: $HOVENWEEP_MF6_PREV_VERSION
+ BUILD_SCRIPT: $HOVENWEEP_BUILD_SCRIPT
\ No newline at end of file
diff --git a/.hpc/BUILD.md b/.hpc/BUILD.md
index dd8257e2b6d..9f9b0ac8374 100644
--- a/.hpc/BUILD.md
+++ b/.hpc/BUILD.md
@@ -1,20 +1,30 @@
+
# Building MODFLOW 6 on HPC systems
-## SLURM job
+_On Denali_
```
sbatch --reservation=dev cray-meson-build.slurm.batch
```
-## Interactive job
+_Hovenweep_
+
+```
+sbatch cray-hovenweep-meson-build.slurm.batch
+```
+
+## Create a module file for a new version of MODFLOW 6
+
+On _Denali_ make a copy of an existing module file using
+```
+rsync /home/software/denali/contrib/impd/modulefiles/modflow/6.5.0.dev0 /home/software/denali/contrib/impd/modulefiles/modflow/6.x.x
+```
+On _Hovenweep_ make a copy of an existing module file using
```
-module switch PrgEnv-${PE_ENV,,} PrgEnv-intel
-module load cray-petsc meson ninja
-export PKG_CONFIG_PATH=/opt/cray/pe/mpt/7.7.19/gni/mpich-intel/16.0/lib/pkgconfig:/opt/cray/pe/petsc/3.14.5.0/real/INTEL/19.1/x86_skylake/lib/pkgconfig:$PKG_CONFIG_PATH
+rsync /home/software/hovenweep/contrib/impd/modulefiles/modflow/6.5.0.dev0 /home/software/denali/contrib/impd/modulefiles/modflow/6.x.x
+```
+
+Edit `product_version` in the new module file from `6.5.0.dev0` to `6.x.x` on both systems.
-srun --reservation=dev --account=impd --pty bash
-meson setup builddir -Ddebug=false --prefix=$(pwd) --libdir=bin -Dcray=true -Ddebug=false --wipe
-meson install -C builddir
-```
\ No newline at end of file
diff --git a/.hpc/cray-hovenweep-meson-build.slurm.batch b/.hpc/cray-hovenweep-meson-build.slurm.batch
new file mode 100644
index 00000000000..a1020f54cf0
--- /dev/null
+++ b/.hpc/cray-hovenweep-meson-build.slurm.batch
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+#SBATCH --job-name=hovenweep-build
+#SBATCH --nodes=1
+#SBATCH --ntasks=2
+#SBATCH --account=impd
+#SBATCH --time=00:10:00
+#SBATCH --output=slurm-%j.out
+#SBATCH --chdir=../
+
+set -euxo pipefail
+
+# load appropriate modules
+module switch PrgEnv-${PE_ENV,,} PrgEnv-intel
+module load petsc/3.15.5
+export PKG_CONFIG_PATH=$CRAY_MPICH_DIR/lib/pkgconfig:$PKG_CONFIG_PATH
+
+# list loaded modules
+module list
+
+# define the project root (expected to be cwd)
+MODFLOW6ROOT=$(pwd)
+
+# define the version
+VERSION=$(cat "$MODFLOW6ROOT/version.txt")
+echo "MODFLOW 6 version: $VERSION"
+
+# define paths relative to the root directory
+BUILDDIR=$MODFLOW6ROOT/$PE_ENV-$VERSION
+BINDIR=$BUILDDIR/src
+TESTDIR=$MODFLOW6ROOT/.mf6minsim
+
+# define the installation location
+PREFIX=/home/software/hovenweep/contrib/impd/apps/modflow/$VERSION/$PE_ENV/2023.2.0
+
+# build MODFLOW 6
+CC=cc CXX=CC F77=ftn F90=ftn FC=ftn meson setup $BUILDDIR --prefix=$PREFIX --bindir=bin --libdir=lib -Dcray=true -Ddebug=false --wipe
+meson compile -C $BUILDDIR
+
+# install MODFLOW 6
+meson install -C $BUILDDIR
+
+# test MODFLOW 6 build
+cd $TESTDIR
+
+# serial run
+$BINDIR/mf6
+
+# parallel run
+srun $BINDIR/mf6 -p
\ No newline at end of file
diff --git a/.hpc/cray-meson-build.slurm.batch b/.hpc/cray-meson-build.slurm.batch
index c49de516198..1f62d43f6f3 100644
--- a/.hpc/cray-meson-build.slurm.batch
+++ b/.hpc/cray-meson-build.slurm.batch
@@ -1,11 +1,14 @@
#!/bin/bash
-#SBATCH --job-name=meson-MODFLOW-build
+#SBATCH --job-name=denali-build
#SBATCH --nodes=1
#SBATCH --ntasks=2
#SBATCH --account=impd
#SBATCH --time=00:10:00
#SBATCH --output=slurm-%j.out
+#SBATCH --chdir=../
+
+set -euxo pipefail
# load appropriate modules
module switch PrgEnv-${PE_ENV,,} PrgEnv-intel
@@ -15,17 +18,27 @@ export PKG_CONFIG_PATH=/opt/cray/pe/mpt/7.7.19/gni/mpich-intel/16.0/lib/pkgconfi
# list loaded modules
module list
-# move to root directory
-cd ..
+# define the project root (expected to be cwd)
+MODFLOW6ROOT=$(pwd)
+
+# define the version
+VERSION=$(cat "$MODFLOW6ROOT/version.txt")
+echo "MODFLOW 6 version: $VERSION"
# define paths relative to the root directory
-MODFLOW6ROOT=$(pwd)
-BINDIR=$MODFLOW6ROOT/bin
+BUILDDIR=$MODFLOW6ROOT/$PE_ENV-$VERSION
+BINDIR=$BUILDDIR/src
TESTDIR=$MODFLOW6ROOT/.mf6minsim
+# define the installation location
+PREFIX=/home/software/denali/contrib/impd/apps/modflow/$VERSION/$PE_ENV/19.1.0.166
+
# build MODFLOW 6
-CC=cc CXX=CC F77=ftn F90=ftn FC=ftn meson setup builddir --prefix=$(pwd) --libdir=bin -Dcray=true -Ddebug=false --wipe
-meson install -C builddir
+CC=cc CXX=CC F77=ftn F90=ftn FC=ftn meson setup $BUILDDIR --prefix=$PREFIX --bindir=bin --libdir=lib -Dcray=true -Ddebug=false
+meson compile -C $BUILDDIR
+
+# install MODFLOW 6
+meson install -C $BUILDDIR
# test MODFLOW 6 build
cd $TESTDIR
@@ -34,4 +47,4 @@ cd $TESTDIR
$BINDIR/mf6
# parallel run
-srun $BINDIR/mf6 -p
+srun $BINDIR/mf6 -p
\ No newline at end of file
diff --git a/.hpc/deploy.sh b/.hpc/deploy.sh
new file mode 100755
index 00000000000..b95b6c7f381
--- /dev/null
+++ b/.hpc/deploy.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+set -euxo pipefail
+
+# fetch and checkout latest
+ssh -l "$SSH_USERNAME" "$SSH_HOSTNAME" "cd $MF6_PROJ_ROOT && git fetch $GIT_REMOTE && git checkout $GIT_REMOTE/$GIT_BRANCH"
+echo "Updated repository $MF6_PROJ_ROOT"
+# submit a job to build mf6
+jobid=$(ssh -l "$SSH_USERNAME" "$SSH_HOSTNAME" "sbatch --account=$SLURM_ACCOUNT --reservation=$SLURM_RESERVATION --parsable -D $MF6_PROJ_ROOT $MF6_PROJ_ROOT/$BUILD_SCRIPT" | tail -n 1)
+echo "Submitted build job $jobid"
+# submit a job to update the mf6 module
+jobid=$(ssh -l "$SSH_USERNAME" "$SSH_HOSTNAME" "sbatch --export=ALL,MF6_PREV_VERSION=$MF6_PREV_VERSION,MF6_PROJ_ROOT=$MF6_PROJ_ROOT,MODULES_PATH=$MODULES_PATH --account=$SLURM_ACCOUNT --reservation=$SLURM_RESERVATION --parsable -D $MF6_PROJ_ROOT -d afterok:$jobid $MF6_PROJ_ROOT/$MODULE_SCRIPT" | tail -n 1)
+echo "Submitted module update job $jobid"
\ No newline at end of file
diff --git a/.hpc/update-module.slurm.batch b/.hpc/update-module.slurm.batch
new file mode 100644
index 00000000000..00135103525
--- /dev/null
+++ b/.hpc/update-module.slurm.batch
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+#SBATCH --job-name=update-mf6-module
+#SBATCH --nodes=1
+#SBATCH --ntasks=1
+#SBATCH --account=impd
+#SBATCH --time=00:05:00
+#SBATCH --output=slurm-%j.out
+#SBATCH --chdir=../
+
+set -euxo pipefail
+
+# this script expects cwd to be mf6 project root, with env vars...
+# - MF6_PREV_VERSION, mf6 modulefile version to use as a template
+# - MODULES_PATH, the base path of the module system
+MF6_PROJ_ROOT=$(pwd)
+MF6_BINDIR="$MF6_PROJ_ROOT/bin"
+MF6_LOCAL_VERSION=$(cat "$MF6_PROJ_ROOT/version.txt")
+
+# ...and assumes the module system is laid out as follows
+MF6_MODULEFILE_PATH="$MODULES_PATH/modulefiles/modflow/$MF6_LOCAL_VERSION"
+MF6_MODULES_PATH="$MODULES_PATH/apps/modflow"
+MF6_MODULE_PATH="$MF6_MODULES_PATH/$MF6_LOCAL_VERSION"
+
+# create mf6 modulefile if needed (the build script
+# will have already created the module directory)
+if [ ! -f "$MF6_MODULEFILE_PATH" ]; then
+ rsync "$MODULES_PATH/modulefiles/modflow/$MF6_PREV_VERSION" "$MF6_MODULEFILE_PATH"
+ sed -i -e "s/$MF6_PREV_VERSION/$MF6_LOCAL_VERSION/g" "$MF6_MODULEFILE_PATH"
+ echo "Created module file: $MF6_MODULEFILE_PATH"
+fi
\ No newline at end of file
diff --git a/.readthedocs.yml b/.readthedocs.yml
index 086932137d8..808702f8077 100644
--- a/.readthedocs.yml
+++ b/.readthedocs.yml
@@ -5,6 +5,11 @@
# Required
version: 2
+build:
+ os: "ubuntu-22.04"
+ tools:
+ python: "3.11"
+
# Build documentation in the docs/ directory with Sphinx
sphinx:
configuration: .doc/conf.py
@@ -18,6 +23,5 @@ formats: all
# Optionally set the version of Python and requirements required to build your docs
python:
- version: 3.8
install:
- requirements: .build_rtd_docs/requirements.rtd.txt
diff --git a/.vscode/build_vscode.py b/.vscode/build_vscode.py
index 88d3e8b4c97..d222cc7c772 100644
--- a/.vscode/build_vscode.py
+++ b/.vscode/build_vscode.py
@@ -24,7 +24,7 @@
if args.buildtype == "release":
setup_flag = ["-Doptimization=2"]
elif args.buildtype == "debug":
- setup_flag = ["-Doptimization=0"]
+ setup_flag = ["-Ddebug=true", "-Doptimization=0"]
if not os.path.isdir(builddir):
command = [
diff --git a/.vscode/tasks.json b/.vscode/tasks.json
index 490baff6416..76f6cb2fe2d 100644
--- a/.vscode/tasks.json
+++ b/.vscode/tasks.json
@@ -26,7 +26,10 @@
"release",
"build",
],
- "group": "build",
+ "group": {
+ "kind": "build",
+ "isDefault": true
+ },
"presentation": {
"clear": true
}
@@ -53,7 +56,10 @@
"release",
"rebuild",
],
- "group": "build",
+ "group": {
+ "kind": "build",
+ "isDefault": true
+ },
"presentation": {
"clear": true
}
@@ -81,7 +87,10 @@
"release",
"build",
],
- "group": "build",
+ "group": {
+ "kind": "build",
+ "isDefault": true
+ },
"presentation": {
"clear": true
}
@@ -108,7 +117,10 @@
"release",
"rebuild",
],
- "group": "build",
+ "group": {
+ "kind": "build",
+ "isDefault": true
+ },
"presentation": {
"clear": true
}
@@ -136,7 +148,10 @@
"debug",
"build",
],
- "group": "build",
+ "group": {
+ "kind": "build",
+ "isDefault": true
+ },
"presentation": {
"clear": true
}
@@ -163,7 +178,10 @@
"debug",
"rebuild",
],
- "group": "build",
+ "group": {
+ "kind": "build",
+ "isDefault": true
+ },
"presentation": {
"clear": true
}
@@ -191,7 +209,10 @@
"debug",
"build",
],
- "group": "build",
+ "group": {
+ "kind": "build",
+ "isDefault": true
+ },
"presentation": {
"clear": true
}
@@ -218,7 +239,10 @@
"debug",
"rebuild",
],
- "group": "build",
+ "group": {
+ "kind": "build",
+ "isDefault": true
+ },
"presentation": {
"clear": true
}
diff --git a/CITATION.cff b/CITATION.cff
index b378cfa4c99..1595372ce9e 100644
--- a/CITATION.cff
+++ b/CITATION.cff
@@ -2,8 +2,8 @@ cff-version: 1.2.0
message: If you use this software, please cite the software itself.
type: software
title: MODFLOW 6 Modular Hydrologic Model
-version: 6.4.2
-date-released: '2023-06-28'
+version: 6.4.3
+date-released: '2024-02-07'
doi: 10.5066/F76Q1VQV
abstract: MODFLOW 6 is an object-oriented program and framework developed to provide
a platform for supporting multiple models and multiple types of models within the
@@ -58,6 +58,11 @@ authors:
alias: w-bonelli
affiliation: U.S. Geological Survey
orcid: https://orcid.org/0000-0002-2665-5078
+- family-names: Boyce
+ given-names: Scott E.
+ alias: ScottBoyce
+ affiliation: U.S. Geological Survey
+ orcid: https://orcid.org/0000-0003-0626-9492
- family-names: Banta
given-names: Edward R.
affiliation: U.S. Geological Survey
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
index 62f6ede8b94..d89a4670001 100644
--- a/CODE_OF_CONDUCT.md
+++ b/CODE_OF_CONDUCT.md
@@ -1,7 +1,7 @@
Code of Conduct
===============
-All contributions to- and interactions surrounding- this project will abide by
+All contributions to — and interactions surrounding — this project will abide by
the [USGS Code of Scientific Conduct][1].
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 96e163a9268..e751d48539d 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -141,7 +141,7 @@ To ensure consistency throughout the source code, keep these rules in mind as yo
## Format Rules
Fortran souce code format rules are met by running the
-[fprettify formatter](https://github.com/pseewald/fprettify) while specifying the [MODFLOW 6 fprettify configuration](https://github.com/MODFLOW-USGS/modflow6/blob/develop/distribution/.fprettify.yaml). `fprettify` is included in the Conda `environment.yml` and can be run on the command line or integrated into a [VSCode](https://github.com/MODFLOW-USGS/modflow6/blob/develop/.vscode/README.md) or Visual Studio environment.
+[fprettify formatter](https://github.com/pseewald/fprettify) while specifying the [MODFLOW 6 fprettify configuration](.fprettify.yaml). `fprettify` is included in the Conda `environment.yml` and can be run on the command line or integrated into a [VSCode](.vscode/README.md) or Visual Studio environment.
The configuration file reflects the current minimum standard for Fortran source
formatting. The main goal, however, is consistent and readable Fortran source code and as such pay particular attention to consistency within and across files. As the formatting tool may at times shift code in unexpected ways, check for formatting consistency after running.
@@ -234,9 +234,3 @@ The body should include the motivation for the change and contrast this with pre
The footer should contain any information about **Breaking Changes** and is also the place to reference GitHub issues that this commit **Closes**.
**Breaking Changes** should start with the word `BREAKING CHANGE:` with a space or two newlines. The rest of the commit message is then used for this.
-
-
-[coc]: https://github.com/MODFLOW-USGS/modflow6/blob/develop/CODE_OF_CONDUCT.md
-[dev-doc]: https://github.com/MODFLOW-USGS/modflow6/blob/develop/DEVELOPER.md
-[github]: https://github.com/MODFLOW-USGS/modflow6
-[stackoverflow]: http://stackoverflow.com/questions/tagged/modflow6
diff --git a/DEVELOPER.md b/DEVELOPER.md
index cc53837d8f0..ce6d7a47783 100644
--- a/DEVELOPER.md
+++ b/DEVELOPER.md
@@ -17,6 +17,9 @@ To build and test a parallel version of the program, first read the instructions
- [Windows](#windows)
- [Intel Fortran](#intel-fortran)
- [Windows](#windows-1)
+ - [Compiler compatibility](#compiler-compatibility)
+ - [Compile](#compile)
+ - [Test](#test)
- [Python](#python)
- [Dependencies](#dependencies)
- [`meson`](#meson)
@@ -32,17 +35,32 @@ To build and test a parallel version of the program, first read the instructions
- [Building](#building)
- [Testing](#testing)
- [Configuring a test environment](#configuring-a-test-environment)
- - [Building development binaries](#building-development-binaries)
- - [Rebuilding and installing release binaries](#rebuilding-and-installing-release-binaries)
- - [Updating `flopy` plugins](#updating-flopy-plugins)
- - [External model repositories](#external-model-repositories)
- - [Installing external repos](#installing-external-repos)
- - [Test models](#test-models)
- - [Example models](#example-models)
- - [Running Tests](#running-tests)
- - [Selecting tests with markers](#selecting-tests-with-markers)
- - [External model tests](#external-model-tests)
- - [Writing tests](#writing-tests)
+ - [Configuring unit tests](#configuring-unit-tests)
+ - [Configuring integration tests](#configuring-integration-tests)
+ - [Rebuilding release binaries](#rebuilding-release-binaries)
+ - [Updating FloPy packages](#updating-flopy-packages)
+ - [Installing external models](#installing-external-models)
+ - [Running tests](#running-tests)
+ - [Running unit tests](#running-unit-tests)
+ - [Running integration tests](#running-integration-tests)
+ - [Selecting tests with markers](#selecting-tests-with-markers)
+ - [Writing tests](#writing-tests)
+ - [Writing unit tests](#writing-unit-tests)
+ - [Writing integration tests](#writing-integration-tests)
+- [Generating makefiles](#generating-makefiles)
+ - [Updating extra and excluded files](#updating-extra-and-excluded-files)
+ - [Testing makefiles](#testing-makefiles)
+ - [Installing `make` on Windows](#installing-make-on-windows)
+ - [Using Conda from Git Bash](#using-conda-from-git-bash)
+- [Branching model](#branching-model)
+ - [Overview](#overview)
+ - [Managing long-lived branches](#managing-long-lived-branches)
+ - [Backup](#backup)
+ - [Squash](#squash)
+ - [Rebase](#rebase)
+ - [Cleanup](#cleanup)
+- [Deprecation policy](#deprecation-policy)
+ - [Finding deprecations](#finding-deprecations)
@@ -79,8 +97,16 @@ GNU Fortran can be installed on all three major platforms.
##### macOS
-- [Homebrew](https://brew.sh/): `brew install gcc`
-- [MacPorts](https://www.macports.org/): `sudo port install gcc10`
+- [Homebrew](https://brew.sh/): `brew install gcc@13`
+- [MacPorts](https://www.macports.org/): `sudo port install gcc13`
+
+**Note:** Xcode 15 includes a new linker implementation which breaks GNU Fortran compatibility. A workaround is to set `LDFLAGS` to use the classic linker, for instance:
+
+```shell
+export LDFLAGS="$LDFLAGS -Wl,-ld_classic"
+```
+
+See [this ticket](https://github.com/mesonbuild/meson/issues/12282) on the Meson repository for more information.
##### Windows
@@ -96,10 +122,12 @@ GNU Fortran can be installed on all three major platforms.
#### Intel Fortran
-Intel Fortran can also be used to compile MODFLOW 6 and associated utilities. The `ifort` compiler is available in the [Intel oneAPI HPC Toolkit](https://software.intel.com/content/www/us/en/develop/tools/oneapi/hpc-toolkit/download.html). An installer is bundled with the download.
+Intel Fortran can also be used to compile MODFLOW 6 and associated utilities. The `ifort` and `ifx` compilers are available in the [Intel oneAPI HPC Toolkit](https://software.intel.com/content/www/us/en/develop/tools/oneapi/hpc-toolkit/download.html).
A number of environment variables must be set before using Intel Fortran. General information can be found [here](https://www.intel.com/content/www/us/en/develop/documentation/oneapi-programming-guide/top/oneapi-development-environment-setup.html), with specific instructions to configure a shell session for `ifort` [here](https://www.intel.com/content/www/us/en/develop/documentation/fortran-compiler-oneapi-dev-guide-and-reference/top/compiler-setup/use-the-command-line/specifying-the-location-of-compiler-components.html).
+While the current development version of MODFLOW 6 is broadly compatible with `ifort`, `ifx` compatibility is still limited on Ubuntu and Windows, and `ifx` is not supported on macOS.
+
##### Windows
On Windows, [Visual Studio](https://visualstudio.microsoft.com) and a number of libraries must be installed for `ifort` to work. The required libraries can be installed by ticking the "Desktop Development with C++" checkbox in the Visual Studio Installer's Workloads tab.
@@ -110,6 +138,36 @@ On Windows, [Visual Studio](https://visualstudio.microsoft.com) and a number of
cmd.exe "/K" '"C:\Program Files (x86)\Intel\oneAPI\setvars-vcvarsall.bat" && "C:\Program Files (x86)\Intel\oneAPI\compiler\latest\env\vars.bat" && powershell'
```
+#### Compiler compatibility
+
+The following tables are automatically generated by [a CI workflow](.github/workflows/compilers.yml).
+
+##### Compile
+
+
+| runner | gcc 10 | gcc 11 | gcc 12 | gcc 13 | gcc 7 | gcc 8 | gcc 9 | intel-classic 2021.1 | intel-classic 2021.10 | intel-classic 2021.2 | intel-classic 2021.3 | intel-classic 2021.4 | intel-classic 2021.5 | intel-classic 2021.6 | intel-classic 2021.7 | intel-classic 2021.8 | intel-classic 2021.9 | intel 2021.1 | intel 2021.2 | intel 2021.4 | intel 2022.0 | intel 2022.1 | intel 2022.2.1 | intel 2022.2 | intel 2023.0 | intel 2023.1 | intel 2023.2 |
+|:-------------|:----------------|:----------------|:----------------|:----------------|:---------------|:---------------|:---------------|:------------------------------|:-------------------------------|:------------------------------|:------------------------------|:------------------------------|:------------------------------|:------------------------------|:------------------------------|:------------------------------|:------------------------------|----------------------:|----------------------:|----------------------:|----------------------:|----------------------:|:------------------------|:----------------------|----------------------:|----------------------:|:----------------------|
+| macos-11 | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | | | | | | | | | |
+| macos-12 | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | | | | | | | | | |
+| ubuntu-20.04 | ✓ | ✓ | | | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | | | | | ✓ | ✓ | | | ✓ |
+| ubuntu-22.04 | ✓ | ✓ | ✓ | ✓ | | | ✓ | ✓ | ✓ | ✓ | | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | | | | | ✓ | ✓ | | | ✓ |
+| windows-2019 | ✓ | ✓ | ✓ | ✓ | | | ✓ | | ✓ | | | | | ✓ | ✓ | ✓ | ✓ | | | | | | | ✓ | | | ✓ |
+| windows-2022 | ✓ | ✓ | ✓ | ✓ | | | ✓ | | ✓ | | | | | ✓ | ✓ | ✓ | ✓ | | | | | | | ✓ | | | ✓ |
+
+
+##### Test
+
+
+| runner | gcc 10 | gcc 11 | gcc 12 | gcc 13 | gcc 7 | gcc 8 | gcc 9 | intel-classic 2021.1 | intel-classic 2021.10 | intel-classic 2021.2 | intel-classic 2021.3 | intel-classic 2021.4 | intel-classic 2021.5 | intel-classic 2021.6 | intel-classic 2021.7 | intel-classic 2021.8 | intel-classic 2021.9 | intel 2021.1 | intel 2021.2 | intel 2021.4 | intel 2022.0 | intel 2022.1 | intel 2022.2.1 | intel 2022.2 | intel 2023.0 | intel 2023.1 | intel 2023.2 |
+|:-------------|:----------------|:----------------|:----------------|:----------------|:---------------|:---------------|:---------------|:------------------------------|-------------------------------:|:------------------------------|:------------------------------|:------------------------------|:------------------------------|:------------------------------|:------------------------------|------------------------------:|------------------------------:|----------------------:|----------------------:|----------------------:|----------------------:|----------------------:|------------------------:|----------------------:|----------------------:|----------------------:|----------------------:|
+| macos-11 | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | | | | | | | | | | | |
+| macos-12 | ✓ | ✓ | ✓ | ✓ | | | | ✓ | | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | | | | | | | | | | | |
+| ubuntu-20.04 | ✓ | ✓ | | | ✓ | ✓ | ✓ | ✓ | | ✓ | | ✓ | ✓ | ✓ | ✓ | | | | | | | | | | | | |
+| ubuntu-22.04 | ✓ | ✓ | ✓ | ✓ | | | ✓ | ✓ | | ✓ | | ✓ | ✓ | ✓ | ✓ | | | | | | | | | | | | |
+| windows-2019 | | | | ✓ | | | | | | | | | | ✓ | ✓ | | | | | | | | | | | | |
+| windows-2022 | ✓ | ✓ | ✓ | ✓ | | | ✓ | | | | | | | ✓ | ✓ | | | | | | | | | | | | |
+
+
### Python
Python 3.8+ is required to run MODFLOW 6 tests. A Conda distribution (e.g. [miniconda](https://docs.conda.io/en/latest/miniconda.html) or [Anaconda](https://www.anaconda.com/products/individual) is recommended. Python dependencies are specified in `environment.yml`. To create an environment, run from the project root:
@@ -141,7 +199,7 @@ These are each described briefly below. The Conda `environment.yml` contains a n
##### `fprettify`
-[`fprettify`](https://github.com/pseewald/fprettify) can be used to format Fortran source code and in combination with the [MODFLOW 6 fprettify configuration](https://github.com/MODFLOW-USGS/modflow6/blob/develop/distribution/.fprettify.yaml) establishes a contribution standard for properly formatted MODFLOW 6 Fortran source. This tool can be installed with `pip` or `conda` and used from the command line or integrated with a [VSCode](https://github.com/MODFLOW-USGS/modflow6/blob/develop/.vscode/README.md) or Visual Studio development environment. The `fprettify` package is included in the Conda environment in `environment.yml`. See [contribution guidelines](https://github.com/MODFLOW-USGS/modflow6/blob/develop/CONTRIBUTING.md) for additional information.
+[`fprettify`](https://github.com/pseewald/fprettify) can be used to format Fortran source code and in combination with the [MODFLOW 6 fprettify configuration](.fprettify.yaml) establishes a contribution standard for properly formatted MODFLOW 6 Fortran source. This tool can be installed with `pip` or `conda` and used from the command line or integrated with a [VSCode](.vscode/README.md) or Visual Studio development environment. The `fprettify` package is included in the Conda environment in `environment.yml`. See [contribution guidelines](CONTRIBUTING.md) for additional information.
##### `mfpymake`
@@ -151,7 +209,7 @@ The `mfpymake` package can build MODFLOW 6 and related programs and artifacts (e
[`flopy`](https://github.com/modflowpy/flopy) is used throughout MODFLOW 6 tests to create, run and post-process models.
-Like MODFLOW 6, `flopy` is modular — for each MODFLOW 6 package there is generally a corresponding `flopy` plugin. Plugins are generated dynamically from DFN files stored in this repository under `doc/mf6io/mf6ivar/dfn`.
+Like MODFLOW 6, `flopy` is modular — for each MODFLOW 6 package there is generally a corresponding `flopy` package. Packages are generated dynamically from DFN files stored in this repository under `doc/mf6io/mf6ivar/dfn`.
##### `modflow-devtools`
@@ -198,11 +256,12 @@ git remote add upstream https://github.com/MODFLOW-USGS/modflow6.git
Meson is the recommended build tool for MODFLOW 6. [Meson](https://mesonbuild.com/Getting-meson.html) must be installed and on your [PATH](https://en.wikipedia.org/wiki/PATH_(variable)). Creating and activating the Conda environment `environment.yml` should be sufficient for this.
-Meson build configuration files are provided for MODFLOW 6 as well as `zbud6` and `mf5to6` utility programs:
+Meson build configuration files are provided for MODFLOW 6, for the ZONEBUDGET and MODFLOW 2005 to 6 converter utility programs, and for Fortran unit tests (see [Testing](#testing) section below).
- `meson.build`
- `utils/zonebudget/meson.build`
- `utils/mf5to6/meson.build`
+- `autotest/meson.build`
To build MODFLOW 6, first configure the build directory. By default Meson uses compiler flags for a release build. To create a debug build, add `-Doptimization=0` to the following `setup` command.
@@ -232,45 +291,58 @@ The binaries can then be found in the `bin` folder. `meson install` also trigger
## Testing
-MODFLOW 6 tests are driven with [`pytest`](https://docs.pytest.org/en/7.1.x/), with the help of plugins like `pytest-xdist` and `pytest-cases`. Testing dependencies are included in the Conda environment `environment.yml`.
+MODFLOW 6 unit tests are written in Fortran with [`test-drive`](https://github.com/fortran-lang/test-drive).
+
+MODFLOW 6 integration tests are written in Python with [`pytest`](https://docs.pytest.org/en/7.1.x/). Integration testing dependencies are included in the Conda environment `environment.yml`.
**Note:** the entire test suite should pass before a pull request is submitted. Tests run in GitHub Actions CI and a PR can only be merged with passing tests. See [`CONTRIBUTING.md`](CONTRIBUTING.md) for more information.
### Configuring a test environment
-A few tasks must be completed before running tests:
+Before running tests, there are a few steps to complete. Most importantly, the local development version of MODFLOW 6 must be built, e.g. with Meson as described above.
-- build local MODFLOW 6 development version
-- rebuild the last MODFLOW 6 release
-- install additional executables
-- update FloPy packages and plugins
-- clone MODFLOW 6 test model and example repositories
+The `autotest/build_exes.py` script is provided as a shortcut to rebuild local binaries. It can be invoked as a standard Python script or with Pytest. By default, binaries are placed in the `bin` directory relative to the project root, as in the Meson commands described above. To change the location of the binaries, use the `--path` option.
-Tests expect binaries to live in the `bin` directory relative to the project root, as configured above in the `meson` commands. Binaries are organized as follows:
+#### Configuring unit tests
-- local development binaries in the top-level `bin` folder
-- binaries rebuilt in development mode from the latest release in `bin/rebuilt`
-- related programs installed from the [executables distribution](https://github.com/MODFLOW-USGS/executables/releases) live in `bin/downloaded`
+Unit tests are [driven with Meson](https://mesonbuild.com/Unit-tests.html). A small number of Meson-native tests are defined in the top-level `meson.build` file to check that MODFLOW 6 has installed successfully. These require no additional configuration.
-Tests must be run from the `autotest` folder.
+Additional Fortran unit tests are defined with [`test-drive`](https://github.com/fortran-lang/test-drive) in the `autotest/` folder, with test files named `Test*.f90`. If Meson fails to find the `test-drive` library via `pkg-config`, these will be skipped.
-#### Building development binaries
+To install `test-drive`:
-Before running tests, the local development version of MODFLOW 6 must be built with `meson` as described above. The `autotest/build_exes.py` script is provided as a shortcut to easily rebuild local binaries. The script can be run from the project root with:
+1. Clone the `test-drive` repository
+2. Setup/build with Meson, e.g. in a Unix shell from the `test-drive` project root:
```shell
-python autotest/build_exes.py
+meson setup builddir --prefix=$PWD --libdir=lib
+meson install -C builddir
```
-Alternatively, it can be run from the `autotest` directory with `pytest`:
+3. Add `/lib/pkgconfig` to the `PKG_CONFIG_PATH` environment variable.
+4. To confirm that `test-drive` is detected by `pkg-config`, run `pkg-config --libs test-drive`.
-```shell
-pytest build_exes.py
-```
+Meson should now detect the `test-drive` library when building MODFLOW 6.
+
+**Note:** the `test-drive` source code is not yet compatible with recent versions of Intel Fortran, building with `gfortran` is recommended.
-By default, binaries will be placed in the `bin` directory relative to the project root, as in the `meson` commands described above. To change the location of the binaries, use the `--path` option.
+See the [Running unit tests](#running-unit-tests) section for instructions on running unit tests.
-#### Rebuilding and installing release binaries
+#### Configuring integration tests
+
+A few more tasks must be completed before integration testing:
+
+- install MODFLOW-related executables
+- ensure FloPy packages are up to date
+- install MODFLOW 6 example/test models
+
+As mentioned above, binaries live in the `bin` subdirectory of the project root. This directory is organized as follows:
+
+- local development binaries in the top-level `bin`
+- binaries rebuilt in development mode from the latest MODFLOW 6 release in `bin/rebuilt/`
+- related programs installed from the [executables distribution](https://github.com/MODFLOW-USGS/executables/releases) in `bin/downloaded/`
+
+##### Rebuilding release binaries
Tests require the latest official MODFLOW 6 release to be compiled in develop mode with the same Fortran compiler as the development version. A number of binaries distributed from the [executables repo](https://github.com/MODFLOW-USGS/executables) must also be installed. The script `autotest/get_exes.py` does both of these things. It can be run from the project root with:
@@ -284,70 +356,56 @@ Alternatively, with `pytest` from the `autotest` directory:
pytest get_exes.py
```
-By default, binaries will be placed in the `bin` directory relative to the project root, as in the `meson` commands described above. Nested `bin/downloaded` and `bin/rebuilt` directories are created to contain the rebuilt last release and the downloaded executables, respectively. To change the location of the binaries, use the `--path` option.
+As above, binaries are placed in the `bin` subdirectory of the project root, with nested `bin/downloaded` and `bin/rebuilt` subdirectories containing the rebuilt latest release and downloaded binaries, respectively.
-#### Updating `flopy` plugins
+##### Updating FloPy packages
-Plugins should be regenerated from DFN files before running tests for the first time or after definition files change. This can be done with the `autotest/update_flopy.py` script, which wipes and regenerates plugin classes for the `flopy` installed in the Python environment.
+FloPy packages should be regenerated from DFN files before running tests for the first time or after definition files change. This can be done with the `autotest/update_flopy.py` script, which wipes and regenerates package classes for the FloPy installed in the Python environment.
-**Note:** if you've installed a local version of `flopy` from source, running this script can overwrite files in your repository.
+**Note:** if you've installed an editable local version of FloPy from source, running this script can overwrite files in your repository.
-There is a single optional argument, the path to the folder containing definition files. By default DFN files are assumed to live in `doc/mf6io/mf6ivar/dfn`, making the following identical:
+There is a single optional argument, the path to the folder containing definition files. By default DFN files are assumed to live in `doc/mf6io/mf6ivar/dfn`, making the following functionally identical:
```shell
python autotest/update_flopy.py
python autotest/update_flopy.py doc/mf6io/mf6ivar/dfn
```
-#### External model repositories
+##### Installing external models
-Some autotests load example models from external repositories:
+Some autotests load models from external repositories:
- [`MODFLOW-USGS/modflow6-testmodels`](https://github.com/MODFLOW-USGS/modflow6-testmodels)
- [`MODFLOW-USGS/modflow6-largetestmodels`](https://github.com/MODFLOW-USGS/modflow6-largetestmodels)
- [`MODFLOW-USGS/modflow6-examples`](https://github.com/MODFLOW-USGS/modflow6-examples)
-#### Installing external repos
-
-By default, the tests expect these repositories side-by-side with (i.e. in the same parent directory as) the `modflow6` repository. If the repos are somewhere else, you can set the `REPOS_PATH` environment variable to point to their parent directory. If external model repositories are not found, tests requiring them will be skipped.
-
-**Note:** a convenient way to persist environment variables needed for tests is to store them in a `.env` file in the `autotest` folder. Each variable should be defined on a separate line, with format `KEY=VALUE`. The `pytest-dotenv` plugin will then automatically load any variables found in this file into the test process' environment.
-
-##### Test models
+See the [MODFLOW devtools documentation](https://modflow-devtools.readthedocs.io/en/latest/md/install.html#installing-external-model-repositories) for instructions to install external model repositories.
-The test model repos can simply be cloned — ideally, into the parent directory of the `modflow6` repository, so that repositories live side-by-side:
+### Running tests
-```shell
-git clone MODFLOW-USGS/modflow6-testmodels
-git clone MODFLOW-USGS/modflow6-largetestmodels
-```
+MODFLOW 6 has two kinds of tests: Fortran unit tests, driven with Meson, and Python integration tests, driven with Pytest.
-##### Example models
+#### Running unit tests
-First clone the example models repo:
+Unit tests must be run from the project root. To run unit tests in verbose mode:
```shell
-git clone MODFLOW-USGS/modflow6-examples
+meson test -C builddir --no-rebuild --verbose
```
-The example models require some setup after cloning. Some extra Python dependencies are required to build the examples:
-
-```shell
-cd modflow6-examples/etc
-pip install -r requirements.pip.txt
-```
+Without the `--no-rebuild` options, Meson will rebuild the project before running tests.
-Then, still from the `etc` folder, run:
+Unit tests can be selected by module name (as listed in `autotest/tester.f90`). For instance, to test the `ArrayHandlersModule`:
```shell
-python ci_build_files.py
+meson test -C builddir --no-rebuild --verbose ArrayHandlers
```
-This will build the examples for subsequent use by the tests.
+To run a test module in the `gdb` debugger, just add the `--gdb` flag to the test command.
-### Running Tests
+#### Running integration tests
-Tests are driven by `pytest` and must be run from the `autotest` folder. To run tests in a particular file, showing verbose output, use:
+Integration tests must be run from the `autotest/` folder. To run tests in a particular file, showing verbose output, use:
```shell
pytest -v
@@ -359,7 +417,7 @@ Tests can be run in parallel with the `-n` option, which accepts an integer argu
pytest -v -n auto
```
-#### Selecting tests with markers
+##### Selecting tests with markers
Markers can be used to select subsets of tests. Markers provided in `pytest.ini` include:
@@ -382,8 +440,6 @@ pytest -v -n auto -S
[Smoke testing](https://modflow-devtools.readthedocs.io/en/latest/md/markers.html#smoke-testing) is a form of integration testing which aims to test a decent fraction of the codebase quickly enough to run often during development.
-#### External model tests
-
Tests using models from external repositories can be selected with the `repo` marker:
```shell
@@ -396,27 +452,64 @@ The `large` marker is a subset of the `repo` marker. To test models excluded fro
pytest -v -n auto -m "large"
```
-Test scripts for external model repositories can also be run independently:
+Tests load external models from fixtures provided by `modflow-devtools`. External model tests can be selected by model or simulation name, or by packages used. See the [`modflow-devtools` documentation](https://modflow-devtools.readthedocs.io/en/latest/md/fixtures.html#filtering) for usage examples. Note that filtering options only apply to tests using external models, and will not filter tests defining models in code — for that, the `pytest` built-in `-k` option may be used.
-```shell
-# MODFLOW 6 test models
-pytest -v -n auto test_z01_testmodels_mf6.py
+### Writing tests
+
+#### Writing unit tests
+
+To add a new unit test:
+
+- Add a file containing a test module, e.g. `TestArithmetic.f90`, to the `autotest/` folder.
+
+```fortran
+module TestArithmetic
+ use testdrive, only : error_type, unittest_type, new_unittest, check, test_failed
+ implicit none
+ private
+ public :: collect_arithmetic
+contains
+
+ subroutine collect_arithmetic(testsuite)
+ type(unittest_type), allocatable, intent(out) :: testsuite(:)
+ testsuite = [new_unittest("add", test_add)]
+ end subroutine collect_arithmetic
+
+ subroutine test_add(error)
+ type(error_type), allocatable, intent(out) :: error
+ call check(error, 1 + 1 == 2, "Math works")
+ if (allocated(error)) then
+ call test_failed(error, "Math is broken")
+ return
+ end if
+ end subroutine test_add
+end module TestArithmetic
+```
+
+- Add the module name to the list of `tests` in `autotest/meson.build`, omitting the leading "Test".
-# MODFLOW 5 to 6 conversion test models
-pytest -v -n auto test_z02_testmodels_mf5to6.py
+```fortran
+tests = [
+ 'Arithmetic',
+]
+```
-# models from modflow6-examples repo
-pytest -v -n auto test_z03_examples.py
+- Add a `use` statement for the test module in `autotest/tester.f90`, and add it to the array of `testsuites`.
-# models from modflow6-largetestmodels repo
-pytest -v -n auto test_z03_largetestmodels.py
+```fortran
+use TestArithmetic, only: collect_arithmetic
+...
+testsuites = [ &
+ new_testsuite("Arithmetic", collect_arithmetic), &
+ new_testsuite("something_else", collect_something_else) &
+]
```
-Tests load external models from fixtures provided by `modflow-devtools`. External model tests can be selected by model or simulation name, or by packages used. See the [`modflow-devtools` documentation](https://modflow-devtools.readthedocs.io/en/latest/md/fixtures.html#filtering) for usage examples. Note that filtering options only apply to tests using external models, and will not filter tests defining models in code — for that, the `pytest` built-in `-k` option may be used.
+- Rebuild with Meson from the project root, e.g. `meson install -C builddir`. The test should now be picked up when `meson test...` is next invoked.
-#### Writing tests
+#### Writing integration tests
-Tests should ideally follow a few conventions for easier maintenance:
+Integration tests should ideally follow a few conventions for easier maintenance:
- Use temporary directory fixtures. Tests which write to disk should use `pytest`'s built-in `tmp_path` fixtures or one of the [keepable temporary directory fixtures from `modflow-devtools`](https://modflow-devtools.readthedocs.io/en/latest/md/fixtures.html#keepable-temporary-directories). This prevents tests from polluting one another's state.
@@ -426,3 +519,199 @@ Tests should ideally follow a few conventions for easier maintenance:
- `@pytest.mark.regression` if the test compares results from different versions
**Note:** If all three external model repositories are not installed as described above, some tests will be skipped. The full test suite includes >750 cases. All must pass before changes can be merged into this repository.
+
+##### Test framework
+
+A framework has been developed to streamline common testing patterns. The [`TestFramework`](autotest/framework.py) class, defined in `autotest/framework.py`, is used by most test scripts to configure, run and evaluate one or more MF6 simulations, optionally in comparison with another simulation or model.
+
+Generally, the recommended pattern for a test script is:
+
+```python
+import ...
+
+cases = ["a", "b", ...]
+variable = [1., 0., ...]
+expected = [-1., -1.1, ...]
+
+def build_models(idx, test):
+ v = variable[idx]
+ ...
+
+def check_output(idx, test):
+ e = expected[idx]
+ ...
+
+@pytest.mark.parametrize("idx, name", enumerate(cases))
+def test_mf6model(idx, name, function_tmpdir, targets):
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ compare=None,
+ )
+ test.run()
+```
+
+The framework has two hooks:
+
+- `build`: construct one or more MF6 simulations and/or non-MF6 models with FloPy
+- `check`: evaluate simulation/model output
+
+A test script conventionally contains one or more test cases, fed to the test function as `idx, name` pairs. `idx` can be used to index parameter values or expected results for a specific test case. The test case `name` is useful for model/subdirectory naming, etc.
+
+The framework will not run an unknown program. The path to any program under test (or used for a comparison) must be registered in the `targets` dictionary. Keys are strings. See `autotest/conftest.py` for the contents of `targets` — naming follows the [executables distribution](https://github.com/MODFLOW-USGS/executables).
+
+The `.run()` function
+
+1. builds simulations/models
+2. runs simulations/models
+3. compares simulation/model outputs
+4. checks outputs against expectations
+
+A `compare` parameter may be provided on initialization, which enables comparison of outputs against another program or the latest official release of MF6. The following values are supported:
+
+- `None`: disables comparison — the test simply runs/evaluates any registered simulations/models without comparing results
+- `auto`: attempt to detect the comparison type from contents of test workspace, otherwise skipping comparison
+- `mf6_regression`: compare results against the latest official release rebuilt in develop mode
+- `mf6`, `mf2005`, `mfnwt`, or `mflgr`: compare with results from the selected program — a corresponding model must be provided in `build_models()`
+
+After running the reference and comparison models, the framework will try to find correspondingly named output files to compare — comparison logic may need adjustment when writing tests for new packages or models.
+
+## Generating makefiles
+
+Run `build_makefiles.py` in the `distribution/` directory after adding, removing, or renaming source files. This script uses [Pymake](https://github.com/modflowpy/pymake) to regenerate makefiles. For instance:
+
+```shell
+python build_makefiles.py
+```
+
+### Updating extra and excluded files
+
+If the utilities located in the `utils` directory (e.g., `mf5to6` and `zbud6`) are affected by changes to the modflow6 `src/` directory (such as new or refactored source files), then the new module source file should also be added to the utility's `utils//pymake/extrafiles.txt` file. This file informs Pymake of source files living outside the main source directory, so they can be included in generated makefiles.
+
+Module dependencies for features still under development should be added to `excludefiles.txt`. Source files listed in this file will be excluded from makefiles generated by Pymake. Makefiles should only include the source files needed to the build officially released/supported features.
+
+### Testing makefiles
+
+Makefile generation and usage can be tested from the `distribution` directory by running the `build_makefiles.py` script with Pytest:
+
+```shell
+pytest -v build_makefiles.py
+```
+
+**Note**: `make` is required to test compiling MODFLOW 6 with makefiles. If `make` is not discovered on the system path, compile tests will be skipped.
+
+Makefiles may also be tested manually by changing to the appropriate `make` subdirectory (of the project root for MODFLOW 6, or inside the corresponding `utils` subdirectory for the zonebudget or converter utilities) and invoking `make` (`make clean` may first be necessary to remove previously created object files).
+
+### Installing `make` on Windows
+
+On Windows, it is recommended to generate and test makefiles from a Unix-like shell rather than PowerShell or Command Prompt. Make can be installed via [Conda](https://anaconda.org/conda-forge/make) or [Chocolatey](https://community.chocolatey.org/packages/make). Alternatively, it is included with [mingw](https://sourceforge.net/projects/mingw/), which is also available from [Chocolatey](https://community.chocolatey.org/packages/mingw).
+
+#### Using Conda from Git Bash
+
+To use Conda from Git Bash on Windows, first run the `conda.sh` script located in your Conda installation's `/etc/profile.d` subdirectory. For instance, with Anaconda3:
+
+```shell
+. /c/Anaconda3/etc/profile.d/conda.sh
+```
+
+Or Miniconda3:
+
+```shell
+. /c/ProgramData/miniconda3/etc/profile.d/conda.sh
+```
+
+After this, `conda` commands should be available.
+
+This command may be added to a `.bashrc` or `.bash_profile` file in your home directory to permanently configure Git Bash for Conda.
+
+## Branching model
+
+This section documents MODFLOW 6 branching strategy and other VCS-related procedures.
+
+### Overview
+
+This project follows the [git flow](https://nvie.com/posts/a-successful-git-branching-model/): development occurs on the `develop` branch, while `master` is reserved for the state of the latest release. Development PRs are typically squashed to `develop` to avoid merge commits. At release time, release branches are merged to `master`, and then `master` is merged back into `develop`.
+
+### Managing long-lived branches
+
+When a feature branch takes a long time to develop, it is easy to become out of sync with the develop branch. Depending on the situation, it may be advisable to periodically squash the commits on the feature branch and rebase the change set with develop. The following approach for updating a long-lived feature branch has proven robust.
+
+In the example below, the feature branch is assumed to be called `feat-xyz`.
+
+#### Backup
+
+Begin by creating a backup copy of the feature branch in case anything goes terribly wrong.
+
+```
+git checkout feat-xyz
+git checkout -b feat-xyz-backup
+git checkout feat-xyz
+```
+
+#### Squash
+
+Next, consider squashing commits on the feature branch. If there are many commits, it is beneficial to squash them before trying to rebase with develop. There is a nice article on [squashing commits into one using git](https://www.internalpointers.com/post/squash-commits-into-one-git), which has been very useful for consolidating commits on a long-lived modflow6 feature branch.
+
+A quick and dirty way to squash without interactive rebase (as an alternative to the approach described in the article mentioned in the preceding paragraph) is a soft reset followed by an ammended commit. First making a backup of the feature branch is strongly recommended before using this approach, as accidentally typing `--hard` instead of `--soft` will wipe out all your work.
+
+```
+git reset --soft
+git commit --amend -m "consolidated commit message"
+```
+
+Once the commits on the feature branch have been consolidated, a force push to origin is recommended. This is not strictly required, but it can serve as an intermediate backup/checkpoint so the squashed branch state can be retrieved if rebasing fails. The following command will push `feat-xyz` to origin.
+
+```
+git push origin feat-xyz --force
+```
+
+The `--force` flag's short form is `-f`.
+
+#### Rebase
+
+Now that the commits on `feat-xyz` have been consolidated, it is time to rebase with develop. If there are multiple commits in `feat-xyz` that make changes, undo them, rename files, and/or move things around in subsequent commits, then there may be multiple sets of merge conflicts that will need to be resolved as the rebase works its way through the commit change sets. This is why it is beneficial to squash the feature commits before rebasing with develop.
+
+To rebase with develop, make sure the feature branch is checked out and then type:
+
+```
+git rebase develop
+```
+
+If anything goes wrong during a rebase, there is the `rebase --abort` command to unwind it.
+
+If there are merge conflicts, they will need to be resolved before going forward. Once any conflicts are resolved, it may be worthwhile to rebuild the MODFLOW 6 program and run the smoke tests to ensure nothing is broken.
+
+At this point, you will want to force push the updated feature branch to origin using the same force push command as before.
+
+```
+git push origin feat-xyz --force
+```
+
+#### Cleanup
+
+Lastly, if you are satisfied with the results and confident the procedure went well, then you can delete the backup that you created at the start.
+
+```
+git branch -d feat-xyz-backup
+```
+
+This process can be repeated periodically to stay in sync with the develop branch and keep a clean commit history.
+
+## Deprecation policy
+
+To deprecate a MODFLOW 6 input/output option in a DFN file:
+
+- Add a new `deprecated x.y.z` attribute to the appropriate variable in the package DFN file, where `x.y.z` is the version the deprecation is introduced. Mention the deprecation prominently in the release notes.
+- If support for the deprecated option is removed (typically after at least 2 minor or major releases or 1 year), add a new `removed x.y.z` attribute to the variable in the DFN file, where `x.y.z` is the version in which support for the option was removed. The line containing `deprecated x.y.z` should not be deleted. Mention the removal prominently in the release notes.
+- Deprecated/removed attributes are not removed from DFN files but remain in perpetuity. The `doc/mf6io/mf6ivar/deprecations.py` script generates a markdown deprecation table which is converted to LaTeX by `doc/ReleaseNotes/mk_deprecations.py` for inclusion in the MODFLOW 6 release notes. Deprecations and removals should still be mentioned separately in the release notes, however.
+
+### Finding deprecations
+
+To search for deprecations and removals in DFN files on a system with `git` and standard Unix commands available:
+
+```shell
+git grep 'deprecated' -- '*.dfn' | awk '/^*.dfn:deprecated/'
+```
diff --git a/PARALLEL.md b/PARALLEL.md
index 8229ae1cbbb..3d1e8f11ebc 100644
--- a/PARALLEL.md
+++ b/PARALLEL.md
@@ -5,7 +5,7 @@ This document describes how to set up your build environment for developing and
---
**DISCLAIMER**
-*Expectations on platform compatibility*
+*Expectations on platform compatibility*
The serial version of the MODFLOW 6 program has had no external dependencies and is traditionally available for a variety of platforms (Windows, GNU/linux, macOS) and compatible with the mainstream Fortran compilers (gfortran, ifort). The parallel version comes with dependencies on third party components, most notably the MPI and PETSc libraries. While the goal is a continued support of the above mentioned configurations, this has become more challenging and can generally not be guaranteed. To assist developers as well as end users who are planning to compile the code themselves, a list of successfully tested build configurations will be included in this document.
@@ -19,7 +19,7 @@ The design philosophy has been to maintain MODFLOW as a single codebase and have
## Prerequisites
-The parallel version of MODFLOW 6 requires the the Message Passing Interface (MPI) and the Portable, Extensible Toolkit for Scientific Computation (PETSc - pronounced PET-see (/ˈpɛt-siː/)) libraries.
+The parallel version of MODFLOW 6 requires the Message Passing Interface (MPI) and the Portable, Extensible Toolkit for Scientific Computation (PETSc - pronounced PET-see (/ˈpɛt-siː/)) libraries.
### MPI
@@ -36,7 +36,7 @@ In addition to compiling, the MPI toolset is also required to run a parallel sim
### PETSc
-The PETSc library is a suite of data structures and routines for the scalable (parallel) solution of scientific applications modeled by partial differential equations:
+The PETSc library is a suite of data structures and routines for the scalable (parallel) solution of scientific applications modeled by partial differential equations:
https://petsc.org/release/
@@ -44,38 +44,41 @@ The PETSc library (version 3.16 or higher) is used by MODFLOW for its parallel l
## Compiling MPI and PETSC from source
-The PETSc website gives details on a large number of configurations, depending on the target platform/OS, and many different ways to configure/make/install the library: https://petsc.org/release/install/. Building on Windows is notoriously challenging and discouraged by the PETSc development team. On Linux, however, PETSc can be installed (configure/make/install) by executing the following command
+The PETSc website gives details on a large number of configurations, depending on the target platform/OS, and many different ways to configure/make/install the library: https://petsc.org/release/install/. Building on Windows is notoriously challenging and discouraged by the PETSc development team. On Linux, however, PETSc can be installed (configure/make/install) by executing the following command
+
```
$ ./configure --download-openmpi --download-fblaslapack
$ make all
```
-in a terminal open in the root directory of your PETSc download
-
+in a terminal open in the root directory of your PETSc download
## Using a package manager to install MPI and PETSc
Use of a package manager can simplify the process of building the parallel version of MODFLOW 6.
### MacOS
+
[OpenMPI](https://formulae.brew.sh/formula/open-mpi) and [PETSc](https://formulae.brew.sh/formula/petsc) are available on Homebrew for Intel and Apple Silicon (M1). Both of these depend on [gcc 13.1.0](https://formulae.brew.sh/formula/gcc). [pkg-config](https://formulae.brew.sh/formula/pkg-config) should also be installed from Homebrew, if not already installed, so that Meson will be able to resolve the installation location of MPI and PETSc.
### Ubuntu
-OpenMPI and PETSc are available for a variety of Ubuntu versions using the Advanced Packaging Tool (apt).
+
+OpenMPI and PETSc are available for a variety of Ubuntu versions using the Advanced Packaging Tool (apt).
### Windows
-???
+Under evaluation.
## Using pkg-config to check your PETSc installation
-Eventually, the MODFLOW build process has to resolve the installation location of all external dependencies. The pkg-config tool (https://en.wikipedia.org/wiki/Pkg-config) can be used to take care of that.
+Eventually, the MODFLOW build process has to resolve the installation location of all external dependencies. The pkg-config tool (https://en.wikipedia.org/wiki/Pkg-config) can be used to take care of that.
```
pkg-config --libs petsc
```
If PETSc was build from source, you can check the contents of the folder
+
```
$PETSC_DIR/$PETSC_ARCH/lib/pkgconfig/
```
@@ -84,7 +87,6 @@ and confirm that there are one or more `*.pc` files in there. A similar `pkgconf
To connect everything, both of these folder paths have to be added to the `PKG_CONFIG_PATH` variable so that the `pkg-config` executable can resolve the installed libraries.
-
## Building the parallel version of MODFLOW 6
The primary build system for MODFLOW is Meson (https://mesonbuild.com/). The `meson.build` script takes an additional argument to activate a parallel build of the software. E.g for building and installing a parallel release version:
@@ -95,11 +97,12 @@ meson setup builddir -Ddebug=false -Dparallel=true \
meson install -C builddir
meson test --verbose --no-rebuild -C builddir
```
+
Note that changing the option flags in the `meson setup` command requires the flag `--reconfigure` to reconfigure the build directory. If the `PKG_CONFIG_PATH` was set as described above, the linking to PETSc and MPI is done automatically.
-It's always a good idea to check your `mf6pro` executable to confirm that it is successfully linked against the external dependencies. You can use the command line tools `ldd` (Linux), `otool` (macOS), or `Dependencies.exe` (Windows, https://github.com/lucasg/Dependencies) to do that. In the list of dependencies, you should be able to identify `libpetsc` and `libmpi` for parallel builds.
+It's always a good idea to check your parallel MODFLOW executable to confirm that it is successfully linked against the external dependencies. You can use the command line tools `ldd` (Linux), `otool` (macOS), or `Dependencies.exe` (Windows, https://github.com/lucasg/Dependencies) to do that. In the list of dependencies, you should be able to identify `libpetsc` and `libmpi` for parallel builds.
-The other build systems in the MODFLOW project (MS Visual Studio, `pymake`, `Makefile`) continue to be supported for *serial* builds only. `pymake` uses the `excludefiles.txt` to ignore those files that can only be build when MPI and PETSc are present on the system. In MS Visual Studio these same files are included in the solution but not in the build process.
+The other build systems in the MODFLOW project (MS Visual Studio, `pymake`, `Makefile`) continue to be supported for *serial* builds only. `pymake` uses the `excludefiles.txt` to ignore those files that can only be build when MPI and PETSc are present on the system. In MS Visual Studio these same files are included in the solution but not in the build process.
---
@@ -111,8 +114,7 @@ Parallel MODFLOW was designed to have all third party functionality (MPI and PET
---
-
-## Testing the parallel of MODFLOW 6
+## Testing the parallel of MODFLOW 6
Parallel MODFLOW can be tested using the same test framework as the serial program, with just a few modifications. To run a test inside the `autotest` folder in parallel mode, make sure to add a marker `@pytest.mark.parallel` so that the test is only executed in the Continuous Integration when running a configuration with a parallel build of MODFLOW.
@@ -121,6 +123,7 @@ The `TestSimulation` object that is being run from the framework should be confi
```
$ pytest -s --parallel test_par_gwf01.py
```
+
Running without the `--parallel` flag will simply skip the test.
## Debugging
@@ -130,11 +133,13 @@ The most straightforward way to debug a parallel simulation is to start a run an
```
-wait_dbg
```
+
telling MODFLOW to pause immediately after startup. This will give you time to attach one or multiple debuggers to the processes. Then start the parallel program, for example on two cores:
```
mpiexec -np 2 mf6 -p
```
+
In the process explorer you should now see 2 processes called `mf6` or `mf6.exe`. On the prompt where the command was executed, MODFLOW waits for input:
```
@@ -165,7 +170,8 @@ In VSCode parallel debugging is easiest done by duplicating the development envi
]
}
```
-After building parallel MODFLOW, press `Ctrl+Shift+p` to execute *Workspaces: Duplicate As Workspace in New Window*. This will open a second VSCode window, identical to the first. Starting the debug process and selecting *"Attach to ..."* pop ups a process selection window with the processes started from the `mpiexec` command described above. Select both, each from their own instance of the VSCode program. Now you can put breakpoints in the code, "Hit enter to continue" on the command prompt, and step through the parallel processes side-by-side.
+
+After building parallel MODFLOW, press `Ctrl+Shift+p` to execute *Workspaces: Duplicate As Workspace in New Window*. This will open a second VSCode window, identical to the first. Starting the debug process and selecting *"Attach to ..."* opens a process selection window with the processes started from the `mpiexec` command described above. Select both, each from their own instance of the VSCode program. Now you can put breakpoints in the code, "Hit enter to continue" on the command prompt, and step through the parallel processes side-by-side.
---
**TIP**
@@ -174,19 +180,23 @@ Make sure that you work with gdb versions >= 10. We have found that earlier vers
---
-
## Compatibility
Parallel MODFLOW has been built successfully with the following configurations:
-| Operating System | Toolchain | MPI | PETSc | Package Manager |
-|-----------------------|-------------|---------------|--------|-----------------|
-| MS Windows | ? | ? | ? | NA |
-| WSL2 (Ubuntu 20.04.5) | gcc 9.4.0 | OpenMPI 4.0.3 | 3.18.2 | NA |
-| Ubuntu 22.04 | gcc 9.5.0 | OpenMPI 4.1.4 | 3.18.5 | NA |
-| Ubuntu 23.04 | gcc 13 | OpenMPI 4.1.4 | 3.18.1 | apt |
-| macOS 12.6.3 | gcc 9.5.0 | OpenMPI 4.1.4 | 3.18.5 | NA |
-| macOS 12.6.6 | gcc 13.1.0 | OpenMPI 4.1.5 | 3.19.1 | Homebrew |
+| Operating System | Toolchain | MPI | PETSc | Package Manager |
+|-------------------------------------|---------------------------|-------------------|---------------------|-----------------|
+| MS Windows | ? | ? | ? | NA |
+| WSL2 (Ubuntu 20.04.5) | gcc 9.4.0 | OpenMPI 4.0.3 | 3.18.2 | NA |
+| macOS 12.6.3 | gcc 9.5.0 | OpenMPI 4.1.4 | 3.18.5 | NA |
+| macOS 12.6.6 | gcc 13.1.0 | OpenMPI 4.1.5 | 3.19.1 | Homebrew |
+| Ubuntu 22.04 | gcc 9.5.0 | OpenMPI 4.1.4 | 3.18.5 | NA |
+| Ubuntu 22.04 ARM64 | gcc 11.4.0 | OpenMPI 4.1.5 | 3.19.3 | apt |
+| Ubuntu 22.04 ARM64 | gcc 9.5.0 | MPICH 3.4.1 | 3.15.5 | NA |
+| Ubuntu 22.04 ARM64 | gcc 12.3.0 | MPICH 4.1.1 | 3.19.6 | NA |
+| Ubuntu 22.04 ARM64 | gcc 12.3.0 | MPICH 4.1.1 | 3.20.0 | NA |
+| SUSE Linux Enterprise Server 15 SP2 | intel 19.1.0.166 20191121 | CRAY-MPICH 7.7.19 | CRAY-PETSC 3.14.5.0 | NA |
+| Red Hat Enterprise Linux 8.7 | intel 2021.10.0 20230609 | CRAY-MPICH 8.1.26 | 3.15.5 | NA |
The most up-to-date configurations are available in the GitHub CI script: `.github/workflows/ci.yml` under the task `parallel_test`. These are being tested upon every change to the `develop` branch of MODFLOW.
@@ -194,4 +204,46 @@ To improve support, we kindly ask you to share your experience with building and
## Known issues
-tbd
\ No newline at end of file
+### Building PETSc on Ubuntu 22.04 with MPICH and GNU compilers
+
+Versions of PETSc that use MPICH 3.4 (v3.14, v3.15, v3.16)) must be built with gcc-9 or earlier. Versions of PETSc that use MPICH 4.1 (v3.17 or newer) can be built with newer versions of the gcc compiler (gcc-11, gcc-12, etc.).
+
+Meson does not correctly load the Fortran compiler flags from the `mpich.pc` package configuration file in the `$PETSC_DIR/$PETSC_ARCH/lib/pkgconfig` directory. To overcome this issue, make a copy of `mpich.pc` and name it `mpichfort.pc`. Then determine the appropriate Fortran flags using
+
+```
+$PETSC_DIR/$PETSC_ARCH/bin/mpifort -show
+```
+
+which will return something like
+
+```
+$ linux-real-gcc12.3.0-3.20.0/bin/mpifort -show
+gfortran -fPIC -ffree-line-length-none -ffree-line-length-0 -Wno-lto-type-mismatch -O2 -fallow-argument-mismatch -I/media/psf/Development/petsc/linux-real-gcc12.3.0-3.20.0/include -I/media/psf/Development/petsc/linux-real-gcc12.3.0-3.20.0/include -L/media/psf/Development/petsc/linux-real-gcc12.3.0-3.20.0/lib -lmpifort -Wl,-rpath -Wl,/media/psf/Development/petsc/linux-real-gcc12.3.0-3.20.0/lib -Wl,--enable-new-dtags -lmpi
+```
+
+Copy the returned Fortran flags and replace the `Libs:` and `Cflags:` attributes in the `mpichfort.pc` file. Also modify the `Name:` attribute to `mpichfort`. The modified `mpichfort.pc` file should look something like
+
+```
+# this gives access to the mpich header files
+prefix=/media/psf/Development/petsc/linux-real-gcc9.5.0-3.20.0
+exec_prefix=${prefix}
+libdir=/media/psf/Development/petsc/linux-real-gcc9.5.0-3.20.0/lib
+includedir=${prefix}/include
+
+Name: mpichfort
+Description: High Performance and portable MPI
+Version: 4.1.2
+URL: http://www.mcs.anl.gov/research/projects/mpich
+Requires:
+Libs: -fPIC -ffree-line-length-none -ffree-line-length-0 -Wno-lto-type-mismatch -O2 -fallow-argument-mismatch -I/media/psf/Development/petsc/linux-real-gcc12.3.0-3.20.0/include -I/media/psf/Development/petsc/linux-real-gcc12.3.0-3.20.0/include -L/media/psf/Development/petsc/linux-real-gcc12.3.0-3.20.0/lib -lmpifort -Wl,-rpath -Wl,/media/psf/Development/petsc/linux-real-gcc12.3.0-3.20.0/lib -Wl,--enable-new-dtags -lmpi
+Cflags: -fPIC -ffree-line-length-none -ffree-line-length-0 -Wno-lto-type-mismatch -O2 -fallow-argument-mismatch -I/media/psf/Development/petsc/linux-real-gcc12.3.0-3.20.0/include -I/media/psf/Development/petsc/linux-real-gcc12.3.0-3.20.0/include -L/media/psf/Development/petsc/linux-real-gcc12.3.0-3.20.0/lib -lmpifort -Wl,-rpath -Wl,/media/psf/Development/petsc/linux-real-gcc12.3.0-3.20.0/lib -Wl,--enable-new-dtags -lmpi
+
+# pkg-config does not understand Cxxflags, etc. So we allow users to
+# query them using the --variable option
+
+cxxflags= -Wno-lto-type-mismatch -Wno-psabi -O2 -std=gnu++17 -fPIC -I${includedir}
+fflags=-fPIC -ffree-line-length-none -ffree-line-length-0 -Wno-lto-type-mismatch -O2 -I${includedir}
+fcflags=-fPIC -ffree-line-length-none -ffree-line-length-0 -Wno-lto-type-mismatch -O2 -I${includedir}
+```
+
+The `/media/psf/Development/petsc/linux-real-gcc12.3.0-3.20.0/include` and `/media/psf/Development/petsc/linux-real-gcc12.3.0-3.20.0/lib` entries in the `Libs:` and `Cflags:` attributes can be replaced with `${includedir}` and `${libdir}`, respectively, to simplify `mpichfort.pc`.
\ No newline at end of file
diff --git a/README.md b/README.md
index 306731b232e..5e561c379b6 100644
--- a/README.md
+++ b/README.md
@@ -3,7 +3,7 @@
This is the development repository for the USGS MODFLOW 6 Hydrologic Model. The official USGS distribution is available at [USGS Release Page](https://water.usgs.gov/ogw/modflow/MODFLOW.html).
-### Version 6.4.2
+### Version 6.4.3
[![GitHub release](https://img.shields.io/github/release/MODFLOW-USGS/modflow6.svg)](https://github.com/MODFLOW-USGS/modflow6/releases/latest)
[![MODFLOW 6 continuous integration](https://github.com/MODFLOW-USGS/modflow6/actions/workflows/ci.yml/badge.svg)](https://github.com/MODFLOW-USGS/modflow6/actions/workflows/ci.yml)
@@ -94,6 +94,8 @@ The GWT model for MODFLOW 6 simulates three-dimensional transport of a single so
[Hughes, J.D., Leake, S.A., Galloway, D.L., and White, J.T., 2022, Documentation for the Skeletal Storage, Compaction, and Subsidence (CSUB) Package of MODFLOW 6: U.S. Geological Survey Techniques and Methods, book 6, chap. A62, 57 p., https://doi.org/10.3133/tm6A62](https://doi.org/10.3133/tm6A62)
+[Langevin, C.D., Hughes, J.D., Provost, A.M., Russcher, M.J. and Panday, S., 2023, MODFLOW as a Configurable Multi-Model Hydrologic Simulator: Groundwater, https://doi.org/10.1111/gwat.13351](https://doi.org/10.1111/gwat.13351)
+
#### ***Software/Code***
The following is the general citation for the MODFLOW 6 software.
diff --git a/autotest/TestArrayHandlers.f90 b/autotest/TestArrayHandlers.f90
new file mode 100644
index 00000000000..321beb7b16c
--- /dev/null
+++ b/autotest/TestArrayHandlers.f90
@@ -0,0 +1,353 @@
+module TestArrayHandlers
+ use KindModule, only: I4B, DP, LGP
+ use testdrive, only: error_type, unittest_type, new_unittest, check, &
+ test_failed, to_string
+ use ArrayHandlersModule, only: ExpandArray, ExpandArray2D, ExtendPtrArray, &
+ remove_character
+ use ConstantsModule, only: LINELENGTH
+ implicit none
+ private
+ public :: collect_arrayhandlers
+
+contains
+
+ subroutine collect_arrayhandlers(testsuite)
+ type(unittest_type), allocatable, intent(out) :: testsuite(:)
+ testsuite = [ &
+ new_unittest("ExpandArray_int", &
+ test_ExpandArray_int), &
+ new_unittest("ExpandArray_dbl", &
+ test_ExpandArray_dbl), &
+ new_unittest("ExpandArray_lgp", &
+ test_ExpandArray_lgp), &
+ new_unittest("ExpandArray2D_int", &
+ test_ExpandArray2D_int), &
+ new_unittest("ExpandArray2D_dbl", &
+ test_ExpandArray2D_dbl), &
+ ! new_unittest("ExtendPtrArray_int", &
+ ! test_ExtendPtrArray_int), &
+ ! new_unittest("ExtendPtrArray_dbl", &
+ ! test_ExtendPtrArray_dbl), &
+ new_unittest("remove_character", &
+ test_remove_character) &
+ ]
+ end subroutine collect_arrayhandlers
+
+ !> @brief Test 1D int array expansion
+ subroutine test_ExpandArray_int(error)
+ type(error_type), allocatable, intent(out) :: error
+ integer(I4B), allocatable :: a(:)
+ integer(I4B) :: i, lb, n1, n2
+
+ n1 = 2 ! starting size
+ n2 = 5 ! expanded size
+ do lb = -1, 1 ! test default lower bound (1) as well as 0 and -1
+ ! allocate/populate array
+ allocate (a(lb:(lb + n1 - 1)))
+ a(lb) = lb
+ a(lb + 1) = lb + 1
+
+ ! resize array and check new size and bounds
+ call ExpandArray(a, n2 - n1)
+ call check(error, size(a, 1) == n2, &
+ "unexpected size: "//to_string(size(a, 1)))
+ call check(error, lbound(a, 1) == lb, &
+ "unexpected lower bound: "//to_string(lbound(a, 1)))
+ call check(error, ubound(a, 1) == lb + n2 - 1, &
+ "unexpected upper bound: "//to_string(ubound(a, 1)))
+ if (allocated(error)) return
+
+ ! set new array elements and check new/old contents
+ do i = lb + n1 - 1, lb + n2 - 1
+ a(i) = i
+ end do
+ do i = lb, lb + n2 - 1
+ call check(error, a(i) == i, &
+ "unexpected value "//to_string(a(i)) &
+ //" at i="//to_string(i))
+ if (allocated(error)) return
+ end do
+ deallocate (a)
+ end do
+ end subroutine test_ExpandArray_int
+
+ !> @brief Test 1D dbl array expansion
+ subroutine test_ExpandArray_dbl(error)
+ type(error_type), allocatable, intent(out) :: error
+ real(DP), allocatable :: a(:)
+ integer(I4B) :: i, lb, n1, n2
+
+ n1 = 2 ! starting size
+ n2 = 5 ! expanded size
+ do lb = -1, 1 ! test with default lower bound (1) as well as 0 and -1
+ ! allocate/populate array
+ allocate (a(lb:(lb + n1 - 1)))
+ a(lb) = real(lb)
+ a(lb + 1) = real(lb + 1)
+
+ ! resize array and check new size and bounds
+ call ExpandArray(a, n2 - n1)
+ call check(error, size(a, 1) == n2, &
+ "unexpected size: "//to_string(size(a, 1)))
+ call check(error, lbound(a, 1) == lb, &
+ "unexpected lower bound: "//to_string(lbound(a, 1)))
+ call check(error, ubound(a, 1) == lb + n2 - 1, &
+ "unexpected upper bound: "//to_string(ubound(a, 1)))
+ if (allocated(error)) return
+
+ ! set new array elements and check new/old contents
+ do i = lb + n1 - 1, lb + n2 - 1
+ a(i) = real(i)
+ end do
+ do i = lb, lb + n2 - 1
+ call check(error, a(i) == real(i), &
+ "unexpected value "//to_string(a(i)) &
+ //" at i="//to_string(i))
+ if (allocated(error)) return
+ end do
+ deallocate (a)
+ end do
+ end subroutine test_ExpandArray_dbl
+
+ !> @brief Test 1D logical array expansion
+ subroutine test_ExpandArray_lgp(error)
+ type(error_type), allocatable, intent(out) :: error
+ logical(LGP), allocatable :: a(:)
+ integer(I4B) :: i, lb, n1, n2
+
+ n1 = 2 ! starting size
+ n2 = 5 ! expanded size
+ do lb = -1, 1 ! test with default lower bound (1) as well as 0 and -1
+ ! allocate/populate array (alternate T/F starting with false)
+ allocate (a(lb:(lb + n1 - 1)))
+ a(lb) = mod(lb, 2) == 0
+ a(lb + 1) = mod(lb + 1, 2) == 0
+
+ ! resize array and check new size and bounds
+ call ExpandArray(a, n2 - n1)
+ call check(error, size(a, 1) == n2, &
+ "unexpected size: "//to_string(size(a, 1)))
+ call check(error, lbound(a, 1) == lb, &
+ "unexpected lower bound: "//to_string(lbound(a, 1)))
+ call check(error, ubound(a, 1) == lb + n2 - 1, &
+ "unexpected upper bound: "//to_string(ubound(a, 1)))
+ if (allocated(error)) return
+
+ ! set new array elements and check new/old contents
+ do i = lb + n1 - 1, lb + n2 - 1
+ a(i) = mod(i, 2) == 0
+ end do
+ do i = lb, lb + n2 - 1
+ call check(error, a(i) .eqv. (mod(i, 2) == 0), &
+ "unexpected value "// &
+ merge('t', 'f', a(i)) &
+ //" at i="//to_string(i))
+ if (allocated(error)) return
+ end do
+ deallocate (a)
+ end do
+ end subroutine test_ExpandArray_lgp
+
+ !> @brief Test 2D int array expansion
+ subroutine test_ExpandArray2D_int(error)
+ type(error_type), allocatable, intent(out) :: error
+ integer(I4B), allocatable :: a(:, :)
+ integer(I4B) :: i, lb, n1, n2
+
+ n1 = 2 ! starting size
+ n2 = 5 ! expanded size
+ do lb = -1, 1 ! test with default lower bound (1) as well as 0 and -1
+ ! allocate/populate array and check initial size, with
+ ! same lower bound and starting/new size for both dims
+ allocate (a(lb:(lb + n1 - 1), lb:(lb + n1 - 1)))
+ a(lb, :) = lb
+ a(lb + 1, :) = lb + 1
+ call check(error, size(a, 1) == n1 .and. size(a, 2) == n1)
+ if (allocated(error)) return
+
+ ! resize array and check new size and bounds
+ call ExpandArray2D(a, n2 - n1, n2 - n1)
+ call check(error, size(a, 1) == n2, &
+ "unexpected dim1 size: "//to_string(size(a, 1)))
+ call check(error, size(a, 1) == n2, &
+ "unexpected dim2 size: "//to_string(size(a, 1)))
+ call check(error, lbound(a, 1) == lb, &
+ "unexpected dim1 lower bound:"//to_string(lbound(a, 1)))
+ call check(error, ubound(a, 1) == lb + n2 - 1, &
+ "unexpected dim1 upper bound:"//to_string(ubound(a, 1)))
+ call check(error, lbound(a, 2) == lb, &
+ "unexpected dim2 lower bound:"//to_string(lbound(a, 2)))
+ call check(error, ubound(a, 2) == lb + n2 - 1, &
+ "unexpected dim2 upper bound:"//to_string(ubound(a, 2)))
+ if (allocated(error)) return
+
+ ! set new elements starting from the new region, check new/old contents
+ do i = lb + n1 - 1, lb + n2 - 1
+ a(i, :) = i
+ end do
+ do i = lb, lb + n2 - 1
+ if (i < (lb + n1 - 1)) then
+ ! old contents, expect uninitialized values in new slots
+ call check(error, all(a(i, lb:(lb + n1 - 1)) == i), &
+ "unexpected value "//to_string(a(i, i)) &
+ //" at i="//to_string(i))
+ else
+ ! new contents, expect all values as set in prior loop
+ call check(error, all(a(i, :) == i), &
+ "unexpected value "//to_string(a(i, i)) &
+ //" at i="//to_string(i))
+ end if
+ if (allocated(error)) return
+ end do
+ deallocate (a)
+ end do
+ end subroutine test_ExpandArray2D_int
+
+ !> @brief Test 2D dbl array expansion
+ subroutine test_ExpandArray2D_dbl(error)
+ type(error_type), allocatable, intent(out) :: error
+ real(DP), allocatable :: a(:, :)
+ integer(I4B) :: i, lb, n1, n2
+
+ n1 = 2 ! starting size
+ n2 = 5 ! expanded size
+ do lb = -1, 1 ! test with default lower bound (1) as well as 0 and -1
+ ! allocate/populate array and check initial size, with
+ ! same lower bound and starting/new size for both dims
+ allocate (a(lb:(lb + n1 - 1), lb:(lb + n1 - 1)))
+ a(lb, :) = real(lb)
+ a(lb + 1, :) = real(lb + 1)
+ call check(error, size(a, 1) == n1 .and. size(a, 2) == n1)
+ if (allocated(error)) return
+
+ ! resize array and check new size and bounds
+ call ExpandArray2D(a, n2 - n1, n2 - n1)
+ call check(error, size(a, 1) == n2, &
+ "unexpected dim1 size: "//to_string(size(a, 1)))
+ call check(error, size(a, 1) == n2, &
+ "unexpected dim2 size: "//to_string(size(a, 1)))
+ call check(error, lbound(a, 1) == lb, &
+ "unexpected dim1 lower bound:"//to_string(lbound(a, 1)))
+ call check(error, ubound(a, 1) == lb + n2 - 1, &
+ "unexpected dim1 upper bound:"//to_string(ubound(a, 1)))
+ call check(error, lbound(a, 2) == lb, &
+ "unexpected dim2 lower bound:"//to_string(lbound(a, 2)))
+ call check(error, ubound(a, 2) == lb + n2 - 1, &
+ "unexpected dim2 upper bound:"//to_string(ubound(a, 2)))
+ if (allocated(error)) return
+
+ ! set new elements starting from the new region, check new/old contents
+ do i = lb + n1 - 1, lb + n2 - 1
+ a(i, :) = real(i)
+ end do
+ do i = lb, lb + n2 - 1
+ if (i < (lb + n1 - 1)) then
+ ! old contents, expect uninitialized values in new slots
+ call check(error, all(a(i, lb:(lb + n1 - 1)) == real(i)), &
+ "unexpected value "//to_string(a(i, i)) &
+ //" at i="//to_string(i))
+ else
+ ! new contents, expect all values as set in prior loop
+ call check(error, all(a(i, :) == real(i)), &
+ "unexpected value "//to_string(a(i, i)) &
+ //" at i="//to_string(i))
+ end if
+ if (allocated(error)) return
+ end do
+ deallocate (a)
+ end do
+ end subroutine test_ExpandArray2D_dbl
+
+ !> @brief Test 1D int ptr array expansion
+ subroutine test_ExtendPtrArray_int(error)
+ type(error_type), allocatable, intent(out) :: error
+ integer(I4B), allocatable, target :: aa(:)
+ integer(I4B), pointer, contiguous :: a(:)
+ integer(I4B) :: i, lb, n1, n2
+
+ n1 = 2 ! starting size
+ n2 = 5 ! expanded size
+ do lb = -1, 1 ! test with default lower bound (1) as well as 0 and -1
+ ! allocate/populate array and set pointer
+ allocate (aa(lb:(lb + n1 - 1)))
+ aa(lb) = lb
+ aa(lb + 1) = lb + 1
+ a => aa
+
+ ! resize array and check new size and bounds
+ call ExtendPtrArray(a, n2 - n1)
+ call check(error, size(a, 1) == n2, &
+ "unexpected size: "//to_string(size(a, 1)))
+ call check(error, lbound(a, 1) == lb, &
+ "unexpected lower bound: "//to_string(lbound(a, 1)))
+ call check(error, ubound(a, 1) == lb + n2 - 1, &
+ "unexpected upper bound: "//to_string(ubound(a, 1)))
+ if (allocated(error)) return
+
+ ! set new array elements and check new/old contents
+ do i = lb + n1 - 1, lb + n2 - 1
+ a(i) = i
+ end do
+ do i = lb, lb + n2 - 1
+ call check(error, a(i) == i, &
+ "unexpected value "//to_string(a(i)) &
+ //" at i="//to_string(i))
+ if (allocated(error)) return
+ end do
+ nullify (a)
+ deallocate (aa)
+ end do
+ end subroutine test_ExtendPtrArray_int
+
+ !> @brief Test 1D dbl ptr array expansion
+ subroutine test_ExtendPtrArray_dbl(error)
+ type(error_type), allocatable, intent(out) :: error
+ real(DP), allocatable, target :: aa(:)
+ real(DP), pointer, contiguous :: a(:)
+ integer(I4B) :: i, lb, n1, n2
+
+ n1 = 2 ! starting size
+ n2 = 5 ! expanded size
+ do lb = -1, 1 ! test with default lower bound (1) as well as 0 and -1
+ ! allocate/populate array and set pointer
+ allocate (aa(lb:(lb + n1 - 1)))
+ aa(lb) = real(lb)
+ aa(lb + 1) = real(lb + 1)
+ a => aa
+
+ ! resize array and check new size and bounds
+ call ExtendPtrArray(a, n2 - n1)
+ call check(error, size(a, 1) == n2, &
+ "unexpected size: "//to_string(size(a, 1)))
+ call check(error, lbound(a, 1) == lb, &
+ "unexpected lower bound: "//to_string(lbound(a, 1)))
+ call check(error, ubound(a, 1) == lb + n2 - 1, &
+ "unexpected upper bound: "//to_string(ubound(a, 1)))
+ if (allocated(error)) return
+
+ ! set new array elements and check new/old contents
+ do i = lb + n1 - 1, n2
+ a(i) = real(i)
+ end do
+ do i = lb, lb + n2 - 1
+ call check(error, a(i) == real(i), &
+ "unexpected value "//to_string(a(i)) &
+ //" at i="//to_string(i))
+ if (allocated(error)) return
+ end do
+ nullify (a)
+ deallocate (aa)
+ end do
+ end subroutine test_ExtendPtrArray_dbl
+
+ subroutine test_remove_character(error)
+ type(error_type), allocatable, intent(out) :: error
+ character(len=11), allocatable :: s(:)
+ allocate (s(2))
+ s(1) = "hello world"
+ s(2) = "hello earth"
+ call remove_character(s, 1)
+ call check(error, s(1) == "hello earth")
+ end subroutine test_remove_character
+
+end module TestArrayHandlers
diff --git a/autotest/TestDevFeature.f90 b/autotest/TestDevFeature.f90
new file mode 100644
index 00000000000..243c2e9e233
--- /dev/null
+++ b/autotest/TestDevFeature.f90
@@ -0,0 +1,28 @@
+module TestDevFeature
+ use testdrive, only: error_type, unittest_type, new_unittest, check
+ use DevFeatureModule, only: dev_feature
+ use ConstantsModule, only: LINELENGTH
+ use VersionModule, only: IDEVELOPMODE
+
+ implicit none
+ private
+ public :: collect_dev_feature
+
+contains
+
+ subroutine collect_dev_feature(testsuite)
+ type(unittest_type), allocatable, intent(out) :: testsuite(:)
+ testsuite = [ &
+ ! expect failure if in release mode, otherwise pass
+ new_unittest("dev_feature", test_dev_feature, &
+ should_fail=(IDEVELOPMODE == 0)) &
+ ]
+ end subroutine collect_dev_feature
+
+ subroutine test_dev_feature(error)
+ type(error_type), allocatable, intent(out) :: error
+ character(len=LINELENGTH) :: errmsg
+ call dev_feature(errmsg)
+ end subroutine test_dev_feature
+
+end module TestDevFeature
diff --git a/autotest/TestGeomUtil.f90 b/autotest/TestGeomUtil.f90
new file mode 100644
index 00000000000..3447f8eb6e9
--- /dev/null
+++ b/autotest/TestGeomUtil.f90
@@ -0,0 +1,323 @@
+module TestGeomUtil
+ use KindModule, only: I4B, DP
+ use testdrive, only: check, error_type, new_unittest, test_failed, &
+ to_string, unittest_type
+ use GeomUtilModule, only: get_node, get_ijk, get_jk, point_in_polygon, &
+ skew
+ use ConstantsModule, only: LINELENGTH
+ implicit none
+ private
+ public :: collect_geomutil
+
+contains
+
+ subroutine collect_geomutil(testsuite)
+ type(unittest_type), allocatable, intent(out) :: testsuite(:)
+ testsuite = [ &
+ new_unittest("get_node_get_ijk", &
+ test_get_node_get_ijk), &
+ new_unittest("point_in_polygon_sq", &
+ test_point_in_polygon_sq), &
+ new_unittest("point_in_polygon_tri", &
+ test_point_in_polygon_tri), &
+ new_unittest("point_in_polygon_irr", &
+ test_point_in_polygon_irr), &
+ new_unittest("skew", test_skew) &
+ ]
+ end subroutine collect_geomutil
+
+ ! 2D arrays for polygons and check points use column-major indexing
+
+ subroutine test_get_node_get_ijk(error)
+ type(error_type), allocatable, intent(out) :: error
+ integer :: ilay
+ integer :: irow
+ integer :: icol
+ integer :: nlay
+ integer :: nrow
+ integer :: ncol
+ integer :: nnum
+ integer :: ncls
+ integer :: k, i, j
+
+ ! trivial grid with 1 cell
+ nnum = get_node(1, 1, 1, 1, 1, 1)
+ call get_ijk(nnum, 1, 1, 1, ilay, irow, icol)
+ call check(error, nnum == 1)
+ call check(error, ilay == 1)
+ call check(error, irow == 1)
+ call check(error, icol == 1)
+ if (allocated(error)) return
+
+ ! small grid, 3x4x5
+ nlay = 3
+ nrow = 4
+ ncol = 5
+ ncls = nlay * nrow * ncol
+ do k = 1, nlay
+ do i = 1, nrow
+ do j = 1, ncol
+ ! node number from ijk
+ nnum = get_node(k, i, j, nlay, nrow, ncol)
+ call check(error, nnum == (k - 1) * nrow * ncol + (i - 1) * ncol + j)
+ if (allocated(error)) return
+
+ ! ijk from node number
+ call get_ijk(nnum, nrow, ncol, nlay, irow, icol, ilay)
+ call check(error, ilay == k)
+ call check(error, irow == i)
+ call check(error, icol == j)
+ if (allocated(error)) return
+ end do
+ end do
+ end do
+ end subroutine test_get_node_get_ijk
+
+ subroutine test_point_in_polygon(error, shape, &
+ poly, in_pts, out_pts, vert_pts, face_pts)
+ type(error_type), allocatable, intent(inout) :: error
+ character(len=*), intent(in) :: shape
+ real(DP), allocatable, intent(in) :: poly(:, :)
+ real(DP), allocatable, intent(in) :: in_pts(:, :)
+ real(DP), allocatable, intent(in) :: out_pts(:, :)
+ real(DP), allocatable, intent(in) :: vert_pts(:, :)
+ real(DP), allocatable, intent(in) :: face_pts(:, :)
+ integer(I4B) :: i
+ real(DP) :: x, y
+
+ ! test inside points
+ do i = 1, size(in_pts, 2)
+ x = in_pts(1, i)
+ y = in_pts(2, i)
+ call check(error, point_in_polygon(x, y, poly), &
+ "point inside "//shape//" failed: " &
+ //to_string(x)//", "//to_string(y))
+ if (allocated(error)) return
+ end do
+
+ ! test outside points
+ do i = 1, size(out_pts, 2)
+ x = out_pts(1, i)
+ y = out_pts(2, i)
+ call check(error, (.not. point_in_polygon(x, y, poly)), &
+ "point outside "//shape//" failed: " &
+ //to_string(x)//", "//to_string(y))
+ if (allocated(error)) return
+ end do
+
+ ! test vertex points
+ do i = 1, size(vert_pts, 2)
+ x = vert_pts(1, i)
+ y = vert_pts(2, i)
+ call check(error, point_in_polygon(x, y, poly), &
+ "point on "//shape//" vertex failed: " &
+ //to_string(x)//", "//to_string(y))
+ if (allocated(error)) return
+ end do
+
+ ! test face points
+ do i = 1, size(face_pts, 2)
+ x = face_pts(1, i)
+ y = face_pts(2, i)
+ call check(error, point_in_polygon(x, y, poly), &
+ "point on "//shape//" face failed: " &
+ //to_string(x)//", "//to_string(y))
+ if (allocated(error)) return
+ end do
+ end subroutine test_point_in_polygon
+
+ !> @brief Test a unit square
+ subroutine test_point_in_polygon_sq(error)
+ type(error_type), allocatable, intent(out) :: error
+ real(DP), allocatable :: poly(:, :)
+ real(DP), allocatable :: in_pts(:, :)
+ real(DP), allocatable :: out_pts(:, :)
+ real(DP), allocatable :: vert_pts(:, :)
+ real(DP), allocatable :: face_pts(:, :)
+
+ allocate (poly(2, 4))
+
+ allocate (in_pts(2, 3))
+ in_pts(:, 1) = (/0.99_DP, 0.01_DP/)
+ in_pts(:, 2) = (/0.5_DP, 0.5_DP/)
+ in_pts(:, 3) = (/0.0001_DP, 0.9999_DP/)
+
+ allocate (out_pts(2, 2))
+ out_pts(:, 1) = (/0.5_DP, 1.00001_DP/)
+ out_pts(:, 2) = (/-0.5_DP, 34.0_DP/)
+
+ allocate (vert_pts(2, 4))
+ vert_pts(:, 1) = (/0.0_DP, 0.0_DP/)
+ vert_pts(:, 2) = (/1.0_DP, 0.0_DP/)
+ vert_pts(:, 3) = (/0.0_DP, 1.0_DP/)
+ vert_pts(:, 4) = (/1.0_DP, 1.0_DP/)
+
+ allocate (face_pts(2, 4))
+ face_pts(:, 1) = (/0.0_DP, 0.5_DP/)
+ face_pts(:, 2) = (/0.5_DP, 0.0_DP/)
+ face_pts(:, 3) = (/1.0_DP, 0.5_DP/)
+ face_pts(:, 4) = (/0.5_DP, 1.0_DP/)
+
+ poly(:, 1) = (/0.0_DP, 0.0_DP/)
+ poly(:, 2) = (/0.0_DP, 1.0_DP/)
+ poly(:, 3) = (/1.0_DP, 1.0_DP/)
+ poly(:, 4) = (/1.0_DP, 0.0_DP/)
+ call test_point_in_polygon(error, "clockwise square", &
+ poly, in_pts, out_pts, vert_pts, face_pts)
+ if (allocated(error)) return
+
+ poly(:, 1) = (/0.0_DP, 0.0_DP/)
+ poly(:, 2) = (/1.0_DP, 0.0_DP/)
+ poly(:, 3) = (/1.0_DP, 1.0_DP/)
+ poly(:, 4) = (/0.0_DP, 1.0_DP/)
+ call test_point_in_polygon(error, "counter-clockwise square", &
+ poly, in_pts, out_pts, vert_pts, face_pts)
+ if (allocated(error)) return
+
+ deallocate (poly)
+ deallocate (in_pts)
+ deallocate (out_pts)
+ deallocate (vert_pts)
+ deallocate (face_pts)
+ end subroutine test_point_in_polygon_sq
+
+ !> @brief Test a right triangle
+ subroutine test_point_in_polygon_tri(error)
+ type(error_type), allocatable, intent(out) :: error
+ real(DP), allocatable :: poly(:, :)
+ real(DP), allocatable :: in_pts(:, :)
+ real(DP), allocatable :: out_pts(:, :)
+ real(DP), allocatable :: vert_pts(:, :)
+ real(DP), allocatable :: face_pts(:, :)
+
+ allocate (poly(2, 3))
+
+ allocate (in_pts(2, 3))
+ in_pts(:, 1) = (/0.8_DP, 0.0001_DP/)
+ in_pts(:, 2) = (/0.5_DP, 0.49999_DP/)
+ in_pts(:, 3) = (/0.0001_DP, 0.8_DP/)
+
+ allocate (out_pts(2, 2))
+ out_pts(:, 1) = (/0.5_DP, 0.50001_DP/)
+ out_pts(:, 2) = (/-0.5_DP, 34.0_DP/)
+
+ allocate (vert_pts(2, 3))
+ vert_pts(:, 1) = (/0.0_DP, 0.0_DP/)
+ vert_pts(:, 2) = (/1.0_DP, 0.0_DP/)
+ vert_pts(:, 3) = (/0.0_DP, 1.0_DP/)
+
+ allocate (face_pts(2, 3))
+ face_pts(:, 1) = (/0.0_DP, 0.5_DP/)
+ face_pts(:, 2) = (/0.5_DP, 0.0_DP/)
+ face_pts(:, 3) = (/0.5_DP, 0.5_DP/)
+
+ poly(:, 1) = (/0.0_DP, 0.0_DP/)
+ poly(:, 2) = (/0.0_DP, 1.0_DP/)
+ poly(:, 3) = (/1.0_DP, 0.0_DP/)
+ call test_point_in_polygon(error, "clockwise triangle", &
+ poly, in_pts, out_pts, vert_pts, face_pts)
+ if (allocated(error)) return
+
+ poly(:, 1) = (/0.0_DP, 0.0_DP/)
+ poly(:, 2) = (/1.0_DP, 0.0_DP/)
+ poly(:, 3) = (/0.0_DP, 1.0_DP/)
+ call test_point_in_polygon(error, "counter-clockwise triangle", &
+ poly, in_pts, out_pts, vert_pts, face_pts)
+ if (allocated(error)) return
+
+ deallocate (poly)
+ deallocate (in_pts)
+ deallocate (out_pts)
+ deallocate (vert_pts)
+ deallocate (face_pts)
+ end subroutine test_point_in_polygon_tri
+
+ !> @brief Test an irregular polygon
+ subroutine test_point_in_polygon_irr(error)
+ type(error_type), allocatable, intent(out) :: error
+ real(DP), allocatable :: poly(:, :)
+ real(DP), allocatable :: in_pts(:, :)
+ real(DP), allocatable :: out_pts(:, :)
+ real(DP), allocatable :: vert_pts(:, :)
+ real(DP), allocatable :: face_pts(:, :)
+
+ allocate (poly(2, 5))
+
+ allocate (in_pts(2, 3))
+ in_pts(:, 1) = (/0.5_DP, 0.1_DP/)
+ in_pts(:, 2) = (/0.5_DP, 0.49_DP/)
+ in_pts(:, 3) = (/1.999_DP, 1.999_DP/)
+
+ allocate (out_pts(2, 3))
+ out_pts(:, 1) = (/0.5_DP, -0.1_DP/)
+ out_pts(:, 2) = (/0.5_DP, 0.51_DP/)
+ out_pts(:, 3) = (/-0.5_DP, 34.0_DP/)
+
+ allocate (vert_pts(2, 5))
+ vert_pts(:, 1) = (/0.0_DP, 0.0_DP/)
+ vert_pts(:, 2) = (/1.0_DP, 1.0_DP/)
+ vert_pts(:, 3) = (/1.0_DP, 2.0_DP/)
+ vert_pts(:, 4) = (/2.0_DP, 2.0_DP/)
+ vert_pts(:, 5) = (/2.0_DP, 0.0_DP/)
+
+ allocate (face_pts(2, 3))
+ face_pts(:, 1) = (/0.5_DP, 0.5_DP/)
+ face_pts(:, 2) = (/2.0_DP, 1.0_DP/)
+ face_pts(:, 3) = (/1.5_DP, 2.0_DP/)
+
+ poly(:, 1) = (/0.0_DP, 0.0_DP/)
+ poly(:, 2) = (/1.0_DP, 1.0_DP/)
+ poly(:, 3) = (/1.0_DP, 2.0_DP/)
+ poly(:, 4) = (/2.0_DP, 2.0_DP/)
+ poly(:, 5) = (/2.0_DP, 0.0_DP/)
+ call test_point_in_polygon(error, &
+ "clockwise irregular polygon", &
+ poly, in_pts, out_pts, vert_pts, face_pts)
+ if (allocated(error)) return
+
+ poly(:, 1) = (/0.0_DP, 0.0_DP/)
+ poly(:, 2) = (/2.0_DP, 0.0_DP/)
+ poly(:, 3) = (/2.0_DP, 2.0_DP/)
+ poly(:, 4) = (/1.0_DP, 2.0_DP/)
+ poly(:, 5) = (/1.0_DP, 1.0_DP/)
+ call test_point_in_polygon(error, &
+ "counter-clockwise irregular polygon", &
+ poly, in_pts, out_pts, vert_pts, face_pts)
+ if (allocated(error)) return
+
+ deallocate (poly)
+ deallocate (in_pts)
+ deallocate (out_pts)
+ deallocate (vert_pts)
+ deallocate (face_pts)
+ end subroutine test_point_in_polygon_irr
+
+ subroutine test_skew(error)
+ type(error_type), allocatable, intent(out) :: error
+ real(DP) :: v(2)
+
+ ! shear to right
+ v = (/1.0_DP, 1.0_DP/)
+ v = skew(v, (/1.0_DP, 1.0_DP, 1.0_DP/))
+ call check(error, v(1) == 2.0_DP .and. v(2) == 1.0_DP)
+ v = (/2.0_DP, 2.0_DP/)
+ v = skew(v, (/1.0_DP, 0.5_DP, 1.0_DP/))
+ call check(error, v(1) == 3.0_DP .and. v(2) == 2.0_DP)
+
+ ! collapse x dim
+ v = (/2.0_DP, 2.0_DP/)
+ v = skew(v, (/0.0_DP, 0.5_DP, 1.0_DP/))
+ call check(error, v(1) == 1.0_DP .and. v(2) == 2.0_DP, to_string(v(1)))
+
+ ! mirror over x axis
+ v = (/2.0_DP, 2.0_DP/)
+ v = skew(v, (/-1.0_DP, 0.0_DP, 1.0_DP/))
+ call check(error, v(1) == -2.0_DP .and. v(2) == 2.0_DP, to_string(v(1)))
+
+ ! mirror over x and y axis
+ v = (/2.0_DP, 2.0_DP/)
+ v = skew(v, (/-1.0_DP, 0.0_DP, -1.0_DP/))
+ call check(error, v(1) == -2.0_DP .and. v(2) == -2.0_DP, to_string(v(1)))
+ end subroutine test_skew
+
+end module TestGeomUtil
diff --git a/autotest/TestHashTable.f90 b/autotest/TestHashTable.f90
new file mode 100644
index 00000000000..5e20d853a9b
--- /dev/null
+++ b/autotest/TestHashTable.f90
@@ -0,0 +1,41 @@
+module TestHashTable
+ use KindModule, only: I4B, DP
+ use ConstantsModule, only: DNODATA, DZERO
+ use testdrive, only: check, error_type, new_unittest, test_failed, &
+ to_string, unittest_type
+ use HashTableModule, only: HashTableType, hash_table_cr, hash_table_da
+ implicit none
+ private
+ public :: collect_hashtable
+
+contains
+
+ subroutine collect_hashtable(testsuite)
+ type(unittest_type), allocatable, intent(out) :: testsuite(:)
+ testsuite = [ &
+ new_unittest("add_and_get_value", &
+ test_add_and_get_value) &
+ ]
+ end subroutine collect_hashtable
+
+ subroutine test_add_and_get_value(error)
+ type(error_type), allocatable, intent(out) :: error
+ type(HashTableType), pointer :: map
+ integer(I4B) :: i, n
+
+ allocate (map)
+ call hash_table_cr(map)
+
+ n = 3
+ do i = 1, n
+ call map%add(to_string(i), i)
+ end do
+
+ do i = 1, n
+ call check(error, map%get(to_string(i)) == i, &
+ 'wrong value for '//to_string(i))
+ end do
+
+ end subroutine test_add_and_get_value
+
+end module TestHashTable
diff --git a/autotest/TestInputOutput.f90 b/autotest/TestInputOutput.f90
new file mode 100644
index 00000000000..49ca3483321
--- /dev/null
+++ b/autotest/TestInputOutput.f90
@@ -0,0 +1,16 @@
+module TestInputOutput
+ use testdrive, only: error_type, unittest_type, new_unittest, check
+ use ConstantsModule, only: LINELENGTH
+ ! use InputOutputModule, only: ???
+ implicit none
+ private
+ public :: collect_inputoutput
+
+contains
+
+ subroutine collect_inputoutput(testsuite)
+ type(unittest_type), allocatable, intent(out) :: testsuite(:)
+ allocate (testsuite(0))
+ end subroutine collect_inputoutput
+
+end module TestInputOutput
diff --git a/autotest/TestList.f90 b/autotest/TestList.f90
new file mode 100644
index 00000000000..605460ec9d2
--- /dev/null
+++ b/autotest/TestList.f90
@@ -0,0 +1,254 @@
+module TestList
+ use KindModule, only: I4B
+ use testdrive, only: error_type, unittest_type, new_unittest, check
+ use ConstantsModule, only: LINELENGTH
+ use ListModule, only: ListType
+ implicit none
+ private
+ public :: collect_list
+
+ type :: IntNodeType
+ integer :: value
+ end type IntNodeType
+
+contains
+
+ subroutine collect_list(testsuite)
+ type(unittest_type), allocatable, intent(out) :: testsuite(:)
+ testsuite = [ &
+ new_unittest("add_count_get_item", &
+ test_add_count_get_item), &
+ new_unittest("get_get_index_contains", &
+ test_get_index_contains), &
+ new_unittest("get_next_previous_item_reset", &
+ test_get_next_previous_item_reset), &
+ new_unittest("insert_after", &
+ test_insert_after), &
+ new_unittest("remove_node", &
+ test_remove_node) &
+ ]
+ end subroutine collect_list
+
+ subroutine test_add_count_get_item(error)
+ type(error_type), allocatable, intent(out) :: error
+ type(ListType), pointer :: list
+ type(IntNodeType), pointer :: n
+ class(*), pointer :: p
+
+ allocate (list)
+ allocate (n)
+
+ ! empty
+ call check(error, list%Count() == 0, "count should be 0")
+ if (allocated(error)) return
+
+ ! add one node
+ n%value = 1
+ p => n
+ call list%Add(p)
+
+ ! check count
+ call check(error, list%Count() == 1, "count should be 1")
+ if (allocated(error)) return
+
+ ! retrieve item
+ p => list%GetItem(1)
+ call check(error, associated(p, n))
+ select type (item => p)
+ type is (IntNodeType)
+ call check(error, item%value == 1, "wrong value")
+ class default
+ call check(error, .false., "wrong node type")
+ end select
+ if (allocated(error)) return
+
+ deallocate (list)
+ deallocate (n)
+ end subroutine test_add_count_get_item
+
+ subroutine test_get_index_contains(error)
+ type(error_type), allocatable, intent(out) :: error
+ type(ListType), pointer :: list
+ type(IntNodeType), pointer :: n1, n2
+ class(*), pointer :: p
+ integer(I4B) :: i
+
+ allocate (list)
+ allocate (n1)
+ allocate (n2)
+
+ ! add nodes
+ n1%value = 1
+ n2%value = 2
+ p => n1
+ call list%Add(p)
+ p => n2
+ call list%Add(p)
+
+ ! check count
+ call check(error, list%Count() == 2, "count should be 1")
+ if (allocated(error)) return
+
+ ! check get index
+ i = list%GetIndex(p)
+ call check(error, i == 2, "wrong index")
+ if (allocated(error)) return
+
+ ! check contains
+ p => n1
+ call check(error, list%ContainsObject(p), "should contain n1")
+ if (allocated(error)) return
+ p => n2
+ call check(error, list%ContainsObject(p), "should contain n2")
+ if (allocated(error)) return
+
+ deallocate (list)
+ deallocate (n1)
+ deallocate (n2)
+ end subroutine test_get_index_contains
+
+ subroutine test_get_next_previous_item_reset(error)
+ type(error_type), allocatable, intent(out) :: error
+ type(ListType), pointer :: list
+ type(IntNodeType), pointer :: n1, n2, n3
+ class(*), pointer :: p
+ integer(I4B) :: i
+
+ allocate (list)
+ allocate (n1)
+ allocate (n2)
+ allocate (n3)
+
+ ! add nodes
+ n1%value = 1
+ n2%value = 2
+ n3%value = 3
+ p => n1
+ call list%Add(p)
+ p => n2
+ call list%Add(p)
+ p => n3
+ call list%Add(p)
+
+ ! check count
+ call check(error, list%Count() == 3, "count should be 3")
+ if (allocated(error)) return
+
+ ! check get next/previous item
+ p => list%GetNextItem()
+ call check(error, associated(p, n1))
+ p => list%GetNextItem()
+ call check(error, associated(p, n2))
+ p => list%GetPreviousItem()
+ call check(error, associated(p, n1))
+ p => list%GetNextItem()
+ call check(error, associated(p, n2))
+ p => list%GetNextItem()
+ call check(error, associated(p, n3))
+ p => list%GetNextItem()
+ call check(error, (.not. associated(p)))
+ call list%Reset()
+ p => list%GetPreviousItem()
+ call check(error, (.not. associated(p)))
+
+ deallocate (list)
+ deallocate (n1)
+ deallocate (n2)
+ deallocate (n3)
+ end subroutine test_get_next_previous_item_reset
+
+ subroutine test_insert_after(error)
+ type(error_type), allocatable, intent(out) :: error
+ type(ListType), pointer :: list
+ type(IntNodeType), pointer :: n1, n2, n3
+ class(*), pointer :: p
+
+ allocate (list)
+ allocate (n1)
+ allocate (n2)
+ allocate (n3)
+
+ ! add nodes 1 and 3
+ n1%value = 1
+ n2%value = 2
+ n3%value = 3
+ p => n1
+ call list%Add(p)
+ p => n3
+ call list%Add(p)
+
+ ! check count
+ call check(error, list%Count() == 2, "count should be 2")
+ if (allocated(error)) return
+
+ ! insert item after first item
+ p => n2
+ call list%InsertAfter(p, 1)
+
+ ! check count
+ call check(error, list%Count() == 3, "count should be 3")
+ if (allocated(error)) return
+
+ ! check get next/previous item
+ call list%Reset()
+ p => list%GetNextItem()
+ call check(error, associated(p, n1))
+ p => list%GetNextItem()
+ call check(error, associated(p, n2))
+ p => list%GetNextItem()
+ call check(error, associated(p, n3))
+ if (allocated(error)) return
+
+ deallocate (list)
+ deallocate (n1)
+ deallocate (n2)
+ deallocate (n3)
+ end subroutine test_insert_after
+
+ subroutine test_remove_node(error)
+ type(error_type), allocatable, intent(out) :: error
+ type(ListType), pointer :: list
+ type(IntNodeType), pointer :: n1, n2, n3
+ class(*), pointer :: p
+
+ allocate (list)
+ allocate (n1)
+ allocate (n2)
+ allocate (n3)
+
+ ! add nodes
+ n1%value = 1
+ n2%value = 2
+ n3%value = 3
+ p => n1
+ call list%Add(p)
+ p => n2
+ call list%Add(p)
+ p => n3
+ call list%Add(p)
+
+ ! check count
+ call check(error, list%Count() == 3, "count should be 3")
+ if (allocated(error)) return
+
+ ! remove first node
+ call list%RemoveNode(1, .false.)
+ call check(error, list%Count() == 2, "count should be 2")
+ p => list%GetItem(1)
+ call check(error, associated(p, n2))
+ p => list%GetItem(2)
+ call check(error, associated(p, n3))
+
+ ! remove last node
+ call list%RemoveNode(2, .false.)
+ call check(error, list%Count() == 1, "count should be 1")
+ p => list%GetItem(1)
+ call check(error, associated(p, n2))
+
+ deallocate (list)
+ deallocate (n1)
+ deallocate (n2)
+ deallocate (n3)
+ end subroutine test_remove_node
+
+end module TestList
diff --git a/autotest/TestMathUtil.f90 b/autotest/TestMathUtil.f90
new file mode 100644
index 00000000000..a07ad0af55c
--- /dev/null
+++ b/autotest/TestMathUtil.f90
@@ -0,0 +1,243 @@
+module TestMathUtil
+ use KindModule, only: I4B, DP
+ use ConstantsModule, only: DNODATA, DZERO
+ use testdrive, only: check, error_type, new_unittest, test_failed, &
+ to_string, unittest_type
+ use MathUtilModule, only: f1d, is_close, mod_offset, &
+ zeroch, zerotest, zeroin
+ implicit none
+ private
+ public :: collect_mathutil
+
+contains
+
+ subroutine collect_mathutil(testsuite)
+ type(unittest_type), allocatable, intent(out) :: testsuite(:)
+ testsuite = [ &
+ new_unittest("is_close_symmetric", test_is_close_symmetric), &
+ new_unittest("is_close_symmetric_near_0", &
+ test_is_close_symmetric_near_0), &
+ new_unittest("mod_offset", &
+ test_mod_offset), &
+ new_unittest("zeroch", &
+ test_zeroch), &
+ new_unittest("zeroin", &
+ test_zeroin), &
+ new_unittest("zerotest", &
+ test_zerotest) &
+ ]
+ end subroutine collect_mathutil
+
+ subroutine test_mod_offset(error)
+ type(error_type), allocatable, intent(out) :: error
+
+ ! with no offset specified, should behave just like mod
+ call check(error, mod_offset(2, 2) == 0)
+ call check(error, mod_offset(2, 3) == 2)
+ call check(error, mod_offset(2.0_DP, 2.0_DP) == 0.0_DP)
+ call check(error, mod_offset(2.0_DP, 3.0_DP) == 2.0_DP)
+
+ ! with offset d specified, if the result x = a mod n falls
+ ! between 0 and n - 1, the new result x = a mod_d n falls
+ ! between d and d + n - 1.
+ call check(error, mod_offset(2, 3, -2) == -1)
+ call check(error, mod_offset(2, 3, -1) == -1)
+ call check(error, mod_offset(2, 3, 0) == 2)
+ call check(error, mod_offset(2, 3, 1) == 2)
+ call check(error, mod_offset(2, 3, 2) == 2)
+ call check(error, mod_offset(2, 3, 3) == 5)
+ call check(error, mod_offset(2, 3, 4) == 5)
+ call check(error, mod_offset(2.0_DP, 3.0_DP, -1.0_DP) == -1.0_DP)
+ call check(error, mod_offset(2.0_DP, 3.0_DP, 2.0_DP) == 2.0_DP)
+ call check(error, mod_offset(2.0_DP, 3.0_DP, 3.0_DP) == 5.0_DP)
+ end subroutine test_mod_offset
+
+ subroutine test_is_close_symmetric(error)
+ type(error_type), allocatable, intent(out) :: error
+ real(DP) :: a, b, rtol
+
+ ! exact match
+ a = 1.0_DP
+ b = 1.0_DP
+ call check(error, is_close(a, b), &
+ "exp eq: a="//to_string(a)// &
+ ", b="//to_string(b)// &
+ ", eps=default")
+ if (allocated(error)) return
+
+ ! mismatch with default rtol
+ b = 1.0001_DP
+ call check(error, (.not. (is_close(a, b))), &
+ "exp ne: a="//to_string(a)// &
+ ", b="//to_string(b)// &
+ ", eps=default")
+ if (allocated(error)) return
+
+ ! inexact match with large rtol
+ rtol = 1d-2
+ call check(error, is_close(a, b, rtol=rtol), &
+ "exp eq: a="//to_string(a)// &
+ ", b="//to_string(b)// &
+ ", rtol="//to_string(rtol))
+ if (allocated(error)) return
+
+ ! mismatch when we reduce rtol
+ rtol = 0.5d-5
+ call check(error, (.not. is_close(a, b, rtol=rtol)), &
+ "exp ne: a="//to_string(a)// &
+ ", b="//to_string(b)// &
+ ", rtol="//to_string(rtol))
+ if (allocated(error)) return
+
+ ! +/-0
+ call check(error, is_close(0.0_DP, -0.0_DP), &
+ "exp ne: a="//to_string(a)// &
+ ", b="//to_string(b)// &
+ ", eps=default")
+
+ ! DNODATA
+ call check(error, (.not. is_close(0.0_DP, DNODATA)), &
+ "exp ne: a="//to_string(a)// &
+ ", b="//to_string(b)// &
+ ", eps=default")
+ call check(error, is_close(DNODATA, DNODATA), &
+ "exp ne: a="//to_string(a)// &
+ ", b="//to_string(b)// &
+ ", eps=default")
+ call check(error, (.not. is_close(DNODATA, DNODATA / 10)), &
+ "exp ne: a="//to_string(a)// &
+ ", b="//to_string(b)// &
+ ", eps=default")
+ call check(error, (.not. is_close(DNODATA, DNODATA * 10)), &
+ "exp ne: a="//to_string(a)// &
+ ", b="//to_string(b)// &
+ ", eps=default")
+
+ end subroutine test_is_close_symmetric
+
+ subroutine test_is_close_symmetric_near_0(error)
+ type(error_type), allocatable, intent(out) :: error
+ real(DP) :: a, b, rtol, atol
+
+ a = 0.0_DP
+ b = 0.0_DP
+ call check(error, is_close(a, b), &
+ "exp eq: a="//to_string(a)// &
+ ", b="//to_string(b)// &
+ ", rtol=default")
+ if (allocated(error)) return
+
+ a = DZERO
+ b = DZERO
+ call check(error, is_close(a, b), &
+ "exp eq: a="//to_string(a)// &
+ ", b="//to_string(b)// &
+ ", rtol=default")
+ if (allocated(error)) return
+
+ b = 1d-4
+ call check(error, (.not. is_close(a, b)), &
+ "exp eq: a="//to_string(a)// &
+ ", b="//to_string(b)// &
+ ", rtol=default")
+ if (allocated(error)) return
+
+ rtol = 0.999_DP
+ call check(error, &
+ ! expect failure, see above
+ (.not. is_close(a, b, rtol=rtol)), &
+ "exp eq: a="//to_string(a)// &
+ ", b="//to_string(b)// &
+ ", rtol="//to_string(rtol))
+ if (allocated(error)) return
+
+ ! absolute comparison is appropriate when a and/or b are near or equal to 0
+ b = 1d-4
+ atol = 1d-3
+ call check(error, is_close(a, b, atol=atol), &
+ "exp eq: a="//to_string(a)// &
+ ", b="//to_string(b)// &
+ ", atol="//to_string(atol))
+ if (allocated(error)) return
+
+ ! make sure the absolute tolerance is applied
+ b = 1d-4
+ atol = 1d-5
+ call check(error, (.not. is_close(a, b, atol=atol)), &
+ "exp eq: a="//to_string(a)// &
+ ", b="//to_string(b)// &
+ ", atol="//to_string(atol))
+ if (allocated(error)) return
+
+ end subroutine test_is_close_symmetric_near_0
+
+ pure function sine(bet) result(s)
+ real(DP), intent(in) :: bet
+ real(DP) :: s
+ s = sin(bet)
+ end function sine
+
+ subroutine test_zeroch(error)
+ type(error_type), allocatable, intent(out) :: error
+ real(DP), parameter :: pi = 4 * atan(1.0_DP)
+ real(DP) :: z
+ procedure(f1d), pointer :: f
+
+ f => sine
+
+ z = zeroch(-1.0_DP, 1.0_DP, f, 0.001_DP)
+ call check(error, is_close(z, 0.0_DP, atol=1d-6), &
+ 'expected 0, got: '//to_string(z))
+
+ z = zeroch(-4.0_DP, -1.0_DP, f, 0.001_DP)
+ call check(error, is_close(z, -pi, atol=1d-6), &
+ 'expected -pi, got: '//to_string(z))
+
+ z = zeroch(1.0_DP, 4.0_DP, f, 0.001_DP)
+ call check(error, is_close(z, pi, atol=1d-6), &
+ 'expected pi, got: '//to_string(z))
+ end subroutine test_zeroch
+
+ subroutine test_zeroin(error)
+ type(error_type), allocatable, intent(out) :: error
+ real(DP), parameter :: pi = 4 * atan(1.0_DP)
+ real(DP) :: z
+ procedure(f1d), pointer :: f
+
+ f => sine
+
+ z = zeroin(-1.0_DP, 1.0_DP, f, 0.001_DP)
+ call check(error, is_close(z, 0.0_DP, atol=1d-6), &
+ 'expected 0, got: '//to_string(z))
+
+ z = zeroin(-4.0_DP, -1.0_DP, f, 0.001_DP)
+ call check(error, is_close(z, -pi, atol=1d-6), &
+ 'expected -pi, got: '//to_string(z))
+
+ z = zeroin(1.0_DP, 4.0_DP, f, 0.001_DP)
+ call check(error, is_close(z, pi, atol=1d-6), &
+ 'expected pi, got: '//to_string(z))
+ end subroutine test_zeroin
+
+ subroutine test_zerotest(error)
+ type(error_type), allocatable, intent(out) :: error
+ real(DP), parameter :: pi = 4 * atan(1.0_DP)
+ real(DP) :: z
+ procedure(f1d), pointer :: f
+
+ f => sine
+
+ z = zerotest(-1.0_DP, 1.0_DP, f, 0.001_DP)
+ call check(error, is_close(z, 0.0_DP, atol=1d-6), &
+ 'expected 0, got: '//to_string(z))
+
+ z = zerotest(-4.0_DP, -1.0_DP, f, 0.001_DP)
+ call check(error, is_close(z, -pi, atol=1d-6), &
+ 'expected -pi, got: '//to_string(z))
+
+ z = zerotest(1.0_DP, 4.0_DP, f, 0.001_DP)
+ call check(error, is_close(z, pi, atol=1d-6), &
+ 'expected pi, got: '//to_string(z))
+ end subroutine test_zerotest
+
+end module TestMathUtil
diff --git a/autotest/TestMessage.f90 b/autotest/TestMessage.f90
new file mode 100644
index 00000000000..b78d490df5b
--- /dev/null
+++ b/autotest/TestMessage.f90
@@ -0,0 +1,41 @@
+module TestMessage
+ use testdrive, only: error_type, unittest_type, new_unittest, check
+ use MessageModule, only: MessagesType
+ use ConstantsModule, only: LINELENGTH
+
+ implicit none
+ private
+ public :: collect_message
+
+contains
+
+ subroutine collect_message(testsuite)
+ type(unittest_type), allocatable, intent(out) :: testsuite(:)
+ testsuite = [ &
+ new_unittest("init_and_count", test_init_and_count), &
+ new_unittest("store_count_and_write_all", &
+ test_store_count_and_write_all) &
+ ]
+ end subroutine collect_message
+
+ subroutine test_init_and_count(error)
+ type(error_type), allocatable, intent(out) :: error
+ type(MessagesType) :: messages
+ messages = MessagesType()
+ call messages%init()
+ call check(error, messages%count() == 0)
+ end subroutine test_init_and_count
+
+ subroutine test_store_count_and_write_all(error)
+ type(error_type), allocatable, intent(out) :: error
+ type(MessagesType) :: messages
+ messages = MessagesType()
+ call messages%init()
+ call messages%store("1")
+ call messages%store("2")
+ call check(error, messages%count() == 2)
+ ! debug visually with e.g. `meson test --no-rebuild -C builddir --verbose Message`
+ call messages%write_all()
+ end subroutine test_store_count_and_write_all
+
+end module TestMessage
diff --git a/autotest/TestSim.f90 b/autotest/TestSim.f90
new file mode 100644
index 00000000000..b01b9ea93a8
--- /dev/null
+++ b/autotest/TestSim.f90
@@ -0,0 +1,58 @@
+module TestSim
+ use testdrive, only: error_type, unittest_type, new_unittest, check
+ use SimModule, only: store_error, store_warning, store_note, &
+ initial_message, count_errors, count_notes, &
+ count_warnings
+ use ConstantsModule, only: LINELENGTH
+
+ implicit none
+ private
+ public :: collect_sim
+
+contains
+
+ subroutine collect_sim(testsuite)
+ type(unittest_type), allocatable, intent(out) :: testsuite(:)
+ testsuite = [ &
+ new_unittest("store_and_count", test_store_and_count) &
+ ]
+ end subroutine collect_sim
+
+ subroutine test_store_and_count(error)
+ type(error_type), allocatable, intent(out) :: error
+ character(len=LINELENGTH) :: ntemsg
+ character(len=LINELENGTH) :: wrnmsg
+ character(len=LINELENGTH) :: errmsg
+
+ ! define messages
+ ntemsg = "NOTE"
+ wrnmsg = "WARNING"
+ errmsg = "ERROR"
+
+ ! initialize message arrays
+ call initial_message()
+
+ ! check no messages stored
+ call check(error, count_errors() == 0)
+ call check(error, count_warnings() == 0)
+ call check(error, count_notes() == 0)
+ if (allocated(error)) return
+
+ ! todo store a note and check that it's stored
+ call store_note(ntemsg)
+ call check(error, count_notes() == 1)
+ if (allocated(error)) return
+
+ ! todo store a warning and check that it's stored
+ call store_warning(wrnmsg)
+ call check(error, count_warnings() == 1)
+ if (allocated(error)) return
+
+ ! store an error and check that it's stored
+ call store_error(errmsg, terminate=.false.)
+ call check(error, count_errors() == 1)
+ if (allocated(error)) return
+
+ end subroutine test_store_and_count
+
+end module TestSim
diff --git a/autotest/build_exes.py b/autotest/build_exes.py
index ede1d7a67e7..bd7125bd478 100644
--- a/autotest/build_exes.py
+++ b/autotest/build_exes.py
@@ -2,9 +2,10 @@
from pathlib import Path
import pytest
-from conftest import project_root_path
from modflow_devtools.build import meson_build
+from conftest import project_root_path
+
repository = "MODFLOW-USGS/modflow6"
top_bin_path = project_root_path / "bin"
diff --git a/autotest/build_mfio_tex.py b/autotest/build_mfio_tex.py
index ab57f2da85d..aca75f8e242 100644
--- a/autotest/build_mfio_tex.py
+++ b/autotest/build_mfio_tex.py
@@ -49,7 +49,6 @@ def test_rebuild_from_dfn():
pth = "./"
with cwd(npth):
-
# get list of TeX files
files = [
f
diff --git a/autotest/common_regression.py b/autotest/common_regression.py
index a167d9235ee..a1307adc23f 100644
--- a/autotest/common_regression.py
+++ b/autotest/common_regression.py
@@ -1,8 +1,31 @@
import os
import shutil
-import sys
-
-ignore_ext = (
+from pathlib import Path
+from typing import Iterator, List, Optional, Tuple, Union
+from warnings import warn
+
+import numpy as np
+from flopy.utils import CellBudgetFile
+from flopy.utils.compare import compare_heads
+
+COMPARE_PROGRAMS = (
+ "mf2005",
+ "mfnwt",
+ "mfusg",
+ "mflgr",
+ "libmf6",
+ "mf6",
+ "mf6_regression"
+ # todo: "mp7"
+)
+EXTTEXT = {
+ "hds": "head",
+ "hed": "head",
+ "bhd": "head",
+ "ucn": "concentration",
+ "cbc": "cell-by-cell",
+}
+IGNORE_EXTENSIONS = (
".hds",
".hed",
".bud",
@@ -19,185 +42,64 @@
)
-def model_setup(namefile, dst, remove_existing=True, extrafiles=None):
- """Setup MODFLOW-based model files for autotests.
-
- Parameters
- ----------
- namefile : str
- MODFLOW-based model name file.
- dst : str
- destination path for comparison model or file(s)
- remove_existing : bool
- boolean indicating if an existing comparision model or file(s) should
- be replaced (default is True)
- extrafiles : str or list of str
- list of extra files to include in the comparision
-
- Returns
- -------
-
- """
- # Construct src pth from namefile or lgr file
- src = os.path.dirname(namefile)
-
- # Create the destination folder, if required
- create_dir = False
- if os.path.exists(dst):
- if remove_existing:
- print("Removing folder " + dst)
- shutil.rmtree(dst)
- create_dir = True
- else:
- create_dir = True
- if create_dir:
- os.mkdir(dst)
-
- # determine if a namefile is a lgr control file - get individual
- # name files out of the lgr control file
- namefiles = [namefile]
- ext = os.path.splitext(namefile)[1]
- if ".lgr" in ext.lower():
- lines = [line.rstrip("\n") for line in open(namefile)]
- for line in lines:
- if len(line) < 1:
- continue
- if line[0] == "#":
- continue
- t = line.split()
- if ".nam" in t[0].lower():
- fpth = os.path.join(src, t[0])
- namefiles.append(fpth)
-
- # Make list of files to copy
- files2copy = []
- for fpth in namefiles:
- files2copy.append(os.path.basename(fpth))
- ext = os.path.splitext(fpth)[1]
- # copy additional files contained in the name file and
- # associated package files
- if ext.lower() == ".nam":
- fname = os.path.abspath(fpth)
- files2copy = files2copy + get_input_files(fname)
-
- if extrafiles is not None:
- if isinstance(extrafiles, str):
- extrafiles = [extrafiles]
- for fl in extrafiles:
- files2copy.append(os.path.basename(fl))
-
- # Copy the files
- for f in files2copy:
- srcf = os.path.join(src, f)
- dstf = os.path.join(dst, f)
-
- # Check to see if dstf is going into a subfolder, and create that
- # subfolder if it doesn't exist
- sf = os.path.dirname(dstf)
- if not os.path.isdir(sf):
- os.makedirs(sf)
-
- # Now copy the file
- if os.path.exists(srcf):
- print("Copy file '" + srcf + "' -> '" + dstf + "'")
- shutil.copy(srcf, dstf)
- else:
- print(srcf + " does not exist")
-
- return
-
-
-def setup_comparison(namefile, dst, remove_existing=True):
- """Setup a comparison model or comparision file(s) for a MODFLOW-based
- model.
+def adjust_htol(
+ workspace: Union[str, os.PathLike], htol: float = 0.001
+) -> Optional[float]:
+ """Get outer_dvclose value from MODFLOW 6 ims file"""
+
+ dvclose = get_dvclose(workspace)
+ if not dvclose:
+ return htol
+
+ # adjust htol if < IMS outer_dvclose
+ dvclose *= 5.0
+ return dvclose if (htol is None or htol < dvclose) else htol
+
+
+def get_dvclose(workspace: Union[str, os.PathLike]) -> Optional[float]:
+ """Get outer_dvclose value from MODFLOW 6 ims file"""
+ dvclose = None
+ files = os.listdir(workspace)
+ for file_name in files:
+ pth = os.path.join(workspace, file_name)
+ if os.path.isfile(pth):
+ if file_name.lower().endswith(".ims"):
+ with open(pth) as f:
+ lines = f.read().splitlines()
+ for line in lines:
+ if "outer_dvclose" in line.lower():
+ v = float(line.split()[1])
+ if dvclose is None:
+ dvclose = v
+ else:
+ if v > dvclose:
+ dvclose = v
+ break
- Parameters
- ----------
- namefile : str
- MODFLOW-based model name file.
- dst : str
- destination path for comparison model or file(s)
- remove_existing : bool
- boolean indicating if an existing comparision model or file(s) should
- be replaced (default is True)
+ return dvclose
- Returns
- -------
+def get_rclose(workspace: Union[str, os.PathLike]) -> Optional[float]:
+ """Get inner_rclose value from MODFLOW 6 ims file"""
- """
- # Construct src pth from namefile
- src = os.path.dirname(namefile)
- action = None
- for root, dirs, files in os.walk(src):
- dl = [d.lower() for d in dirs]
- if any(".cmp" in s for s in dl):
- idx = None
- for jdx, d in enumerate(dl):
- if ".cmp" in d:
- idx = jdx
- break
- if idx is not None:
- if "mf2005.cmp" in dl[idx] or "mf2005" in dl[idx]:
- action = dirs[idx]
- elif "mfnwt.cmp" in dl[idx] or "mfnwt" in dl[idx]:
- action = dirs[idx]
- elif "mfusg.cmp" in dl[idx] or "mfusg" in dl[idx]:
- action = dirs[idx]
- elif "mf6.cmp" in dl[idx] or "mf6" in dl[idx]:
- action = dirs[idx]
- elif "libmf6.cmp" in dl[idx] or "libmf6" in dl[idx]:
- action = dirs[idx]
- else:
- action = dirs[idx]
- break
- if action is not None:
- dst = os.path.join(dst, f"{action}")
- if not os.path.isdir(dst):
- try:
- os.mkdir(dst)
- except:
- print("Could not make " + dst)
- # clean directory
- else:
- print(f"cleaning...{dst}")
- for root, dirs, files in os.walk(dst):
- for f in files:
- tpth = os.path.join(root, f)
- print(f" removing...{tpth}")
- os.remove(tpth)
- for d in dirs:
- tdir = os.path.join(root, d)
- print(f" removing...{tdir}")
- shutil.rmtree(tdir)
- # copy files
- cmppth = os.path.join(src, action)
- files = os.listdir(cmppth)
- files2copy = []
- if action.lower() == ".cmp":
- for file in files:
- if ".cmp" in os.path.splitext(file)[1].lower():
- files2copy.append(os.path.join(cmppth, file))
- for srcf in files2copy:
- f = os.path.basename(srcf)
- dstf = os.path.join(dst, f)
- # Now copy the file
- if os.path.exists(srcf):
- print("Copy file '" + srcf + "' -> '" + dstf + "'")
- shutil.copy(srcf, dstf)
- else:
- print(srcf + " does not exist")
- else:
- for file in files:
- if ".nam" in os.path.splitext(file)[1].lower():
- files2copy.append(
- os.path.join(cmppth, os.path.basename(file))
- )
- nf = os.path.join(src, action, os.path.basename(file))
- model_setup(nf, dst, remove_existing=remove_existing)
+ rclose = None
+ for pth in workspace.glob("*.ims"):
+ with open(pth, "r") as f:
+ for line in f:
+ if "inner_rclose" in line.lower():
+ v = float(line.split()[1])
+ if rclose is None:
+ rclose = v
+ else:
+ if v > rclose:
+ rclose = v
break
- return action
+ if rclose is None:
+ return 0.5
+
+ rclose *= 5.0
+ return rclose
def get_input_files(namefile):
@@ -227,7 +129,7 @@ def get_input_files(namefile):
if line.strip()[0] in ["#", "!"]:
continue
ext = os.path.splitext(ll[2])[1]
- if ext.lower() not in ignore_ext:
+ if ext.lower() not in IGNORE_EXTENSIONS:
if len(ll) > 3:
if "replace" in ll[3].lower():
continue
@@ -242,7 +144,6 @@ def get_input_files(namefile):
try:
f = open(fname, "r")
for line in f:
-
# Skip invalid lines
ll = line.strip().split()
if len(ll) < 2:
@@ -259,7 +160,7 @@ def get_input_files(namefile):
otherfiles.append(stmp)
break
except:
- print(fname + " does not exist")
+ print(f"{fname} does not exist")
filelist = filelist + otherfiles
@@ -302,132 +203,38 @@ def get_namefiles(pth, exclude=None):
return namefiles
-def get_sim_name(namefiles, rootpth=None):
- """Get simulation name.
-
- Parameters
- ----------
- namefiles : str or list of strings
- path(s) to MODFLOW-based model name files
- rootpth : str
- optional root directory path (default is None)
-
- Returns
- -------
- simname : list
- list of namefiles without the file extension
-
+def get_matching_files(
+ workspace: Union[str, os.PathLike], extensions: Union[str, Iterator[str]]
+) -> Iterator[str]:
"""
- if isinstance(namefiles, str):
- namefiles = [namefiles]
- sim_name = []
- for namefile in namefiles:
- t = namefile.split(os.sep)
- if rootpth is None:
- idx = -1
- else:
- idx = t.index(os.path.split(rootpth)[1])
-
- # build dst with everything after the rootpth and before
- # the namefile file name.
- dst = ""
- if idx < len(t):
- for d in t[idx + 1 : -1]:
- dst += f"{d}_"
-
- # add namefile basename without extension
- dst += t[-1].replace(".nam", "")
- sim_name.append(dst)
-
- return sim_name
-
-
-def setup_mf6(
- src, dst, mfnamefile="mfsim.nam", extrafiles=None, remove_existing=True
-):
- """Copy all of the MODFLOW 6 input files from the src directory to the dst
- directory.
-
+ Get MF6 regression files in the specified workspace,
+ optionally filtering by one or more file extensions.
Parameters
----------
- src : src
- directory path with original MODFLOW 6 input files
- dst : str
- directory path that original MODFLOW 6 input files will be copied to
- mfnamefile : str
- optional MODFLOW 6 simulation name file (default is mfsim.nam)
- extrafiles : bool
- boolean indicating if extra files should be included (default is None)
- remove_existing : bool
- boolean indicating if existing file in dst should be removed (default
- is True)
-
+ workspace : str or PathLike
+ MODFLOW 6 simulation workspace path
+ extensions : str or list of str
+ file extensions to filter
Returns
-------
- mf6inp : list
- list of MODFLOW 6 input files
- mf6outp : list
- list of MODFLOW 6 output files
-
+ An iterator of regression files found
"""
- # Create the destination folder
- create_dir = False
- if os.path.exists(dst):
- if remove_existing:
- print("Removing folder " + dst)
- shutil.rmtree(dst)
- create_dir = True
- else:
- create_dir = True
- if create_dir:
- os.makedirs(dst)
+ workspace = Path(workspace).expanduser().absolute()
+ if isinstance(extensions, str):
+ extensions = [extensions]
- # Make list of files to copy
- fname = os.path.join(src, mfnamefile)
- fname = os.path.abspath(fname)
- mf6inp, mf6outp = get_mf6_files(fname)
- files2copy = [mfnamefile] + mf6inp
-
- # determine if there are any .ex files
- exinp = []
- for f in mf6outp:
- ext = os.path.splitext(f)[1]
- if ext.lower() == ".hds":
- pth = os.path.join(src, f + ".ex")
- if os.path.isfile(pth):
- exinp.append(f + ".ex")
- if len(exinp) > 0:
- files2copy += exinp
- if extrafiles is not None:
- files2copy += extrafiles
-
- # Copy the files
- for f in files2copy:
- srcf = os.path.join(src, f)
- dstf = os.path.join(dst, f)
-
- # Check to see if dstf is going into a subfolder, and create that
- # subfolder if it doesn't exist
- sf = os.path.dirname(dstf)
- if not os.path.isdir(sf):
- try:
- os.mkdir(sf)
- except:
- print("Could not make " + sf)
-
- # Now copy the file
- if os.path.exists(srcf):
- print("Copy file '" + srcf + "' -> '" + dstf + "'")
- shutil.copy(srcf, dstf)
- else:
- print(srcf + " does not exist")
-
- return mf6inp, mf6outp
+ for ext in extensions:
+ for file in workspace.glob(f"*.{ext}"):
+ yield file
def get_mf6_comparison(src):
- """Determine comparison type for MODFLOW 6 simulation.
+ """
+ Determine the comparison type for a MODFLOW 6 simulation
+ based on files present in the simulation workspace. Some
+ files take precedence over others according to the order
+ specified in `COMPARE_PROGRAMS`.
Parameters
----------
@@ -440,210 +247,36 @@ def get_mf6_comparison(src):
comparison type
"""
- action = None
- # Possible comparison - the order matters
- optcomp = (
- "compare",
- ".cmp",
- "mf2005",
- "mf2005.cmp",
- "mfnwt",
- "mfnwt.cmp",
- "mfusg",
- "mfusg.cmp",
- "mflgr",
- "mflgr.cmp",
- "libmf6",
- "libmf6.cmp",
- "mf6",
- "mf6.cmp",
- )
- # Construct src pth from namefile
- action = None
+
for _, dirs, _ in os.walk(src):
dl = [d.lower() for d in dirs]
- for oc in optcomp:
- if any(oc in s for s in dl):
- action = oc
- break
- return action
+ for pattern in COMPARE_PROGRAMS:
+ if any(pattern in s for s in dl):
+ return pattern
-def setup_mf6_comparison(src, dst, remove_existing=True):
- """Setup comparision for MODFLOW 6 simulation.
+def get_mf6_files(namefile, verbose=False):
+ """Get all MODFLOW 6 input and output files in this simulation.
Parameters
----------
- src : src
- directory path with original MODFLOW 6 input files
- dst : str
- directory path that original MODFLOW 6 input files will be copied to
- remove_existing : bool
- boolean indicating if existing file in dst should be removed (default
- is True)
-
- Returns
- -------
- action : str
- comparison type
-
- """
- # get the type of comparison to use (compare, mf2005, etc.)
- action = get_mf6_comparison(src)
-
- if action is not None:
- dst = os.path.join(dst, f"{action}")
- if not os.path.isdir(dst):
- try:
- os.mkdir(dst)
- except:
- print("Could not make " + dst)
- # clean directory
- else:
- print(f"cleaning...{dst}")
- for root, dirs, files in os.walk(dst):
- for f in files:
- tpth = os.path.join(root, f)
- print(f" removing...{tpth}")
- os.remove(tpth)
- for d in dirs:
- tdir = os.path.join(root, d)
- print(f" removing...{tdir}")
- shutil.rmtree(tdir)
- # copy files
- cmppth = os.path.join(src, action)
- files = os.listdir(cmppth)
- files2copy = []
- if action.lower() == "compare" or action.lower() == ".cmp":
- for file in files:
- if ".cmp" in os.path.splitext(file)[1].lower():
- files2copy.append(os.path.join(cmppth, file))
- for srcf in files2copy:
- f = os.path.basename(srcf)
- dstf = os.path.join(dst, f)
- # Now copy the file
- if os.path.exists(srcf):
- print("Copy file '" + srcf + "' -> '" + dstf + "'")
- shutil.copy(srcf, dstf)
- else:
- print(srcf + " does not exist")
- else:
- if "mf6" in action.lower():
- for file in files:
- if "mfsim.nam" in file.lower():
- srcf = os.path.join(cmppth, os.path.basename(file))
- files2copy.append(srcf)
- srcdir = os.path.join(src, action)
- setup_mf6(srcdir, dst, remove_existing=remove_existing)
- break
- else:
- for file in files:
- if ".nam" in os.path.splitext(file)[1].lower():
- srcf = os.path.join(cmppth, os.path.basename(file))
- files2copy.append(srcf)
- nf = os.path.join(src, action, os.path.basename(file))
- model_setup(nf, dst, remove_existing=remove_existing)
- break
-
- return action
-
-
-def get_mf6_nper(tdisfile):
- """Return the number of stress periods in the MODFLOW 6 model.
-
- Parameters
- ----------
- tdisfile : str
- path to the TDIS file
-
- Returns
- -------
- nper : int
- number of stress periods in the simulation
-
- """
- with open(tdisfile, "r") as f:
- lines = f.readlines()
- line = [line for line in lines if "NPER" in line.upper()][0]
- nper = line.strip().split()[1]
- return nper
-
-
-def get_mf6_mshape(disfile):
- """Return the shape of the MODFLOW 6 model.
-
- Parameters
- ----------
- disfile : str
- path to a MODFLOW 6 discretization file
-
- Returns
- -------
- mshape : tuple
- tuple with the shape of the MODFLOW 6 model.
-
- """
- with open(disfile, "r") as f:
- lines = f.readlines()
-
- d = {}
- for line in lines:
-
- # Skip over blank and commented lines
- ll = line.strip().split()
- if len(ll) < 2:
- continue
- if line.strip()[0] in ["#", "!"]:
- continue
-
- for key in ["NODES", "NCPL", "NLAY", "NROW", "NCOL"]:
- if ll[0].upper() in key:
- d[key] = int(ll[1])
-
- if "NODES" in d:
- mshape = (d["NODES"],)
- elif "NCPL" in d:
- mshape = (d["NLAY"], d["NCPL"])
- elif "NLAY" in d:
- mshape = (d["NLAY"], d["NROW"], d["NCOL"])
- else:
- print(d)
- raise Exception("Could not determine model shape")
- return mshape
-
-
-def get_mf6_files(mfnamefile):
- """Return a list of all the MODFLOW 6 input and output files in this model.
-
- Parameters
- ----------
- mfnamefile : str
+ namefile : pathlike
path to the MODFLOW 6 simulation name file
Returns
-------
- filelist : list
- list of MODFLOW 6 input files in a simulation
- outplist : list
- list of MODFLOW 6 output files in a simulation
-
+ A tuple of lists of paths (input files, output files)
"""
- srcdir = os.path.dirname(mfnamefile)
- filelist = []
- outplist = []
-
- filekeys = ["TDIS6", "GWF6", "GWT", "GWF6-GWF6", "GWF-GWT", "IMS6"]
- namefilekeys = ["GWF6", "GWT"]
- namefiles = []
-
- with open(mfnamefile) as f:
-
- # Read line and skip comments
- lines = f.readlines()
-
- for line in lines:
-
+ srcdir = os.path.dirname(namefile)
+ mdl_files = []
+ pkg_files = []
+ out_files = []
+ pkg_keys = ["TDIS6", "GWF6", "GWT6", "GWF6-GWF6", "GWF-GWT", "IMS6"]
+ model_keys = ["GWF6", "GWT"]
+
+ # find model and simulation-level package input files in simulation namefile
+ for line in open(namefile).readlines():
# Skip over blank and commented lines
ll = line.strip().split()
if len(ll) < 2:
@@ -651,21 +284,20 @@ def get_mf6_files(mfnamefile):
if line.strip()[0] in ["#", "!"]:
continue
- for key in filekeys:
+ for key in pkg_keys:
if key in ll[0].upper():
fname = ll[1]
- filelist.append(fname)
+ pkg_files.append(fname)
- for key in namefilekeys:
+ for key in model_keys:
if key in ll[0].upper():
fname = ll[1]
- namefiles.append(fname)
+ mdl_files.append(fname)
- # Go through name files and get files
- for namefile in namefiles:
+ # find model-level package input files in model namefiles
+ for namefile in mdl_files:
fname = os.path.join(srcdir, namefile)
- with open(fname, "r") as f:
- lines = f.readlines()
+ lines = open(fname, "r").readlines()
insideblock = False
for line in lines:
@@ -684,31 +316,34 @@ def get_mf6_files(mfnamefile):
continue
if line.strip()[0] in ["#", "!"]:
continue
- filelist.append(ll[1])
+ pkg_files.append(ll[1])
- # Recursively go through every file and look for other files to copy,
- # such as 'OPEN/CLOSE' and 'TIMESERIESFILE'. If found, then
- # add that file to the list of files to copy.
- flist = filelist
- # olist = outplist
+ # Recurse through package input files and look for input or
+ # output file entries, e.g. 'OPEN/CLOSE', 'TIMESERIESFILE'
+ # or similar
+ flist = pkg_files
while True:
olist = []
- flist, olist = _get_mf6_external_files(srcdir, olist, flist)
- # add to filelist
- if len(flist) > 0:
- filelist = filelist + flist
- # add to outplist
- if len(olist) > 0:
- outplist = outplist + olist
+ flist, olist = get_mf6_external_files(srcdir, olist, flist)
+ pkg_files += flist
+ out_files += olist
# terminate loop if no additional files
# if len(flist) < 1 and len(olist) < 1:
if len(flist) < 1:
break
- return filelist, outplist
+ if verbose:
+ from pprint import pprint
+
+ print(f"Found input files for {namefile}:")
+ pprint(pkg_files)
+ print(f"Expecting output files for {namefile}:")
+ pprint(out_files)
+
+ return pkg_files, out_files
-def _get_mf6_external_files(srcdir, outplist, files):
+def get_mf6_external_files(srcdir, outplist, files):
"""Get list of external files in a MODFLOW 6 simulation.
Parameters
@@ -731,7 +366,6 @@ def _get_mf6_external_files(srcdir, outplist, files):
try:
f = open(fname, "r")
for line in f:
-
# Skip invalid lines
ll = line.strip().split()
if len(ll) < 2:
@@ -829,7 +463,6 @@ def get_mf6_ftypes(namefile, ftypekeys):
ftypes = []
for line in lines:
-
# Skip over blank and commented lines
ll = line.strip().split()
if len(ll) < 2:
@@ -844,36 +477,264 @@ def get_mf6_ftypes(namefile, ftypekeys):
return ftypes
-def get_mf6_blockdata(f, blockstr):
- """Return list with all non comments between start and end of block
- specified by blockstr.
+def get_regression_files(
+ workspace: os.PathLike, extensions
+) -> Tuple[List[str], List[str]]:
+ if isinstance(extensions, str):
+ extensions = [extensions]
+ files = os.listdir(workspace)
+ files0 = []
+ files1 = []
+ for file_name in files:
+ fpth0 = os.path.join(workspace, file_name)
+ if os.path.isfile(fpth0):
+ for extension in extensions:
+ if file_name.lower().endswith(extension):
+ files0.append(fpth0)
+ fpth1 = os.path.join(
+ workspace, "mf6_regression", file_name
+ )
+ files1.append(fpth1)
+ break
+ return files0, files1
+
+
+def setup_model(namefile, dst, remove_existing=True, extrafiles=None):
+ """
+ Setup a non-MF6 model test, copying input files to the destination workspace.
Parameters
----------
- f : file object
- open file object
- blockstr : str
- name of block to search
+ namefile : str
+ MODFLOW-based model name file.
+ dst : str
+ destination path for comparison model or file(s)
+ remove_existing : bool
+ boolean indicating if an existing comparision model or file(s) should
+ be replaced (default is True)
+ extrafiles : str or list of str
+ list of extra files to include in the comparision
+
+ """
+ # Construct src pth from namefile or lgr file
+ src = os.path.dirname(namefile)
+
+ # Create the destination folder, if required
+ create_dir = False
+ if os.path.exists(dst):
+ if remove_existing:
+ print(f"Removing directory '{dst}'")
+ shutil.rmtree(dst)
+ create_dir = True
+ else:
+ create_dir = True
+ if create_dir:
+ os.mkdir(dst)
+
+ # determine if a namefile is a lgr control file - get individual
+ # name files out of the lgr control file
+ namefiles = [namefile]
+ ext = os.path.splitext(namefile)[1]
+ if ".lgr" in ext.lower():
+ lines = [line.rstrip("\n") for line in open(namefile)]
+ for line in lines:
+ if len(line) < 1:
+ continue
+ if line[0] == "#":
+ continue
+ t = line.split()
+ if ".nam" in t[0].lower():
+ fpth = os.path.join(src, t[0])
+ namefiles.append(fpth)
+
+ # Make list of files to copy
+ files2copy = []
+ for fpth in namefiles:
+ files2copy.append(os.path.basename(fpth))
+ ext = os.path.splitext(fpth)[1]
+ # copy additional files contained in the name file and
+ # associated package files
+ if ext.lower() == ".nam":
+ fname = os.path.abspath(fpth)
+ files2copy = files2copy + get_input_files(fname)
+
+ if extrafiles is not None:
+ if isinstance(extrafiles, str):
+ extrafiles = [extrafiles]
+ for fl in extrafiles:
+ files2copy.append(os.path.basename(fl))
+
+ # Copy the files
+ for f in files2copy:
+ srcf = os.path.join(src, f)
+ dstf = os.path.join(dst, f)
+
+ # Check to see if dstf is going into a subfolder, and create that
+ # subfolder if it doesn't exist
+ sf = os.path.dirname(dstf)
+ if not os.path.isdir(sf):
+ os.makedirs(sf)
+
+ # Now copy the file
+ if os.path.exists(srcf):
+ print(f"Copying file '{srcf}' -> '{dstf}'")
+ shutil.copy(srcf, dstf)
+ else:
+ print(f"{srcf} does not exist")
+
+
+def setup_mf6(
+ src, dst, mfnamefile="mfsim.nam", extrafiles=None, remove_existing=True
+):
+ """
+ Setup an MF6 simulation test, copying input files from the source
+ to the destination workspace.
+
+ Parameters
+ ----------
+ src : src
+ directory path with original MODFLOW 6 input files
+ dst : str
+ directory path that original MODFLOW 6 input files will be copied to
+ mfnamefile : str
+ optional MODFLOW 6 simulation name file (default is mfsim.nam)
+ extrafiles : bool
+ boolean indicating if extra files should be included (default is None)
+ remove_existing : bool
+ boolean indicating if existing file in dst should be removed (default
+ is True)
Returns
-------
- data : list
- list of data in specified block
+ mf6inp : list
+ list of MODFLOW 6 input files
+ mf6outp : list
+ list of MODFLOW 6 output files
"""
- data = []
- # find beginning of block
- for line in f:
- if line[0] != "#":
- t = line.split()
- if t[0].lower() == "begin" and t[1].lower() == blockstr.lower():
+ # Create the destination folder
+ create_dir = False
+ if os.path.exists(dst):
+ if remove_existing:
+ print(f"Removing {dst}")
+ shutil.rmtree(dst)
+ create_dir = True
+ else:
+ create_dir = True
+ if create_dir:
+ os.makedirs(dst)
+
+ # Make list of files to copy
+ fname = os.path.join(src, mfnamefile)
+ fname = os.path.abspath(fname)
+ mf6inp, mf6outp = get_mf6_files(fname)
+ files2copy = [mfnamefile] + mf6inp
+
+ # determine if there are any .ex files
+ exinp = []
+ for f in mf6outp:
+ ext = os.path.splitext(f)[1]
+ if ext.lower() == ".hds":
+ pth = os.path.join(src, f + ".ex")
+ if os.path.isfile(pth):
+ exinp.append(f + ".ex")
+ if len(exinp) > 0:
+ files2copy += exinp
+ if extrafiles is not None:
+ files2copy += extrafiles
+
+ # Copy the files
+ for f in files2copy:
+ srcf = os.path.join(src, f)
+ dstf = os.path.join(dst, f)
+
+ # Check to see if dstf is going into a subfolder, and create that
+ # subfolder if it doesn't exist
+ sf = os.path.dirname(dstf)
+ if not os.path.isdir(sf):
+ try:
+ os.mkdir(sf)
+ except:
+ print(f"Could not create directory '{sf}")
+
+ # Now copy the file
+ if os.path.exists(srcf):
+ print(f"Copying file '{srcf}' -> '{dstf}'")
+ shutil.copy(srcf, dstf)
+ else:
+ print(f"{srcf} does not exist")
+
+ return mf6inp, mf6outp
+
+
+def setup_mf6_comparison(
+ src, dst, cmp_exe="mf6", overwrite=True, verbose=False
+):
+ """Setup an output comparison for MODFLOW 6 simulation.
+
+ Parameters
+ ----------
+ src : path-like
+ Directory with original MODFLOW 6 input files.
+ dst : path-like
+ Directory to copy MODFLOW 6 input files to.
+ cmp_exe : str or PathLike, optional
+ Program to compare with, for supported see `COMPARE_PROGRAMSa.
+ overwrite : bool, optional
+ Whether to overwrite the destination directory if it exists (default is True).
+ verbose : bool, optional
+ Whether to show verbose output
+
+ Returns
+ -------
+ action : str
+ comparison type (also the name of the comparison subdirectory in dst)
+
+ """
+
+ if cmp_exe is None:
+ warn(f"No action provided, aborting")
+ return
+
+ # create and/or clean dest dir if needed
+ dst = Path(dst) / cmp_exe
+ dst.mkdir(exist_ok=True)
+ dls = list(os.walk(dst))
+ if overwrite and any(dls):
+ if verbose:
+ print(f"Cleaning directory '{dst}'")
+ for root, dirs, files in dls:
+ for f in files:
+ tpth = os.path.join(root, f)
+ if verbose:
+ print("Removing file '{tpth}'")
+ os.remove(tpth)
+ for d in dirs:
+ tdir = os.path.join(root, d)
+ if verbose:
+ print("Removing directory '{tdir}'")
+ shutil.rmtree(tdir)
+ else:
+ raise ValueError(f"Destination exists but overwrite disabled: {dst}")
+
+ # copy files
+ cmppth = os.path.join(src, cmp_exe)
+ files = os.listdir(cmppth)
+ files2copy = []
+ if "mf6" in cmp_exe.lower():
+ for file in files:
+ if "mfsim.nam" in file.lower():
+ srcf = os.path.join(cmppth, os.path.basename(file))
+ files2copy.append(srcf)
+ srcdir = os.path.join(src, cmp_exe)
+ setup_mf6(srcdir, dst, remove_existing=overwrite)
break
- for line in f:
- if line[0] != "#":
- t = line.split()
- if t[0].lower() == "end" and t[1].lower() == blockstr.lower():
+ else:
+ for file in files:
+ if ".nam" in os.path.splitext(file)[1].lower():
+ srcf = os.path.join(cmppth, os.path.basename(file))
+ files2copy.append(srcf)
+ nf = os.path.join(src, cmp_exe, os.path.basename(file))
+ setup_model(nf, dst, remove_existing=overwrite)
break
- else:
- data.append(line.rstrip())
- return data
diff --git a/autotest/conftest.py b/autotest/conftest.py
index 5f0073b18c2..f278abff0bb 100644
--- a/autotest/conftest.py
+++ b/autotest/conftest.py
@@ -1,51 +1,93 @@
-import platform
+import sys
from pathlib import Path
+from typing import Dict
+from warnings import warn
import pytest
-from modflow_devtools.executables import Executables, build_default_exe_dict
+from modflow_devtools.ostags import get_binary_suffixes
pytest_plugins = ["modflow_devtools.fixtures"]
project_root_path = Path(__file__).resolve().parent.parent
-def should_compare(
- test: str, comparisons: dict, executables: Executables
-) -> bool:
- if test in comparisons.keys():
- dev_ver = Executables.get_version(path=executables.mf6).split(" ")[0]
- reg_ver = Executables.get_version(
- path=executables.mf6_regression
- ).split(" ")[0]
- print(f"MODFLOW 6 development version: {dev_ver}")
- print(f"MODFLOW 6 regression version: {reg_ver}")
- excluded = list(comparisons[test])
- if reg_ver in excluded:
- print(
- f"Regression version {reg_ver} not supported for test {test}, skipping comparison"
- )
- return False
- return True
+_exe_ext, _lib_ext = get_binary_suffixes(sys.platform)
+_binaries_path = project_root_path / "bin"
+_dl_bin_path = _binaries_path / "downloaded"
+_rb_bin_path = _binaries_path / "rebuilt"
+_binaries = {
+ "development": [
+ ("mf6", _binaries_path / f"mf6{_exe_ext}"),
+ ("libmf6", _binaries_path / f"libmf6{_lib_ext}"),
+ ("mf5to6", _binaries_path / f"mf5to6{_exe_ext}"),
+ ("zbud6", _binaries_path / f"zbud6{_exe_ext}"),
+ ],
+ "downloaded": [
+ ("mf2000", _dl_bin_path / f"mf2000{_exe_ext}"),
+ ("mf2005", _dl_bin_path / f"mf2005dbl{_exe_ext}"),
+ ("mfnwt", _dl_bin_path / f"mfnwtdbl{_exe_ext}"),
+ ("mfusg", _dl_bin_path / f"mfusgdbl{_exe_ext}"),
+ ("mflgr", _dl_bin_path / f"mflgrdbl{_exe_ext}"),
+ ("mf2005s", _dl_bin_path / f"mf2005{_exe_ext}"),
+ ("mt3dms", _dl_bin_path / f"mt3dms{_exe_ext}"),
+ ("crt", _dl_bin_path / f"crt{_exe_ext}"),
+ ("gridgen", _dl_bin_path / f"gridgen{_exe_ext}"),
+ ("mp6", _dl_bin_path / f"mp6{_exe_ext}"),
+ ("mp7", _dl_bin_path / f"mp7{_exe_ext}"),
+ ("swtv4", _dl_bin_path / f"swtv4{_exe_ext}"),
+ ("sutra", _dl_bin_path / f"sutra{_exe_ext}"),
+ ("triangle", _dl_bin_path / f"triangle{_exe_ext}"),
+ ("vs2dt", _dl_bin_path / f"vs2dt{_exe_ext}"),
+ ("zonbudusg", _dl_bin_path / f"zonbudusg{_exe_ext}"),
+ ],
+ "rebuilt": [
+ ("mf6_regression", _rb_bin_path / f"mf6{_exe_ext}"),
+ ("libmf6_regression", _rb_bin_path / f"libmf6{_lib_ext}"),
+ ("mf5to6_regression", _rb_bin_path / f"mf5to6{_exe_ext}"),
+ ("zbud6_regression", _rb_bin_path / f"zbud6{_exe_ext}"),
+ ],
+}
@pytest.fixture(scope="session")
def bin_path() -> Path:
- return project_root_path / "bin"
+ return _binaries_path
@pytest.fixture(scope="session")
-def libmf6_path(bin_path) -> Path:
- ext = {
- "Darwin": ".dylib",
- "Linux": ".so",
- "Windows": ".dll",
- }[platform.system()]
- lib_name = bin_path / f"libmf6{ext}"
- return lib_name
-
-
-@pytest.fixture(scope="session")
-def targets(bin_path) -> Executables:
- return Executables(**build_default_exe_dict(bin_path))
+def targets() -> Dict[str, Path]:
+ """
+ Target executables for tests. These include local development builds as
+ well as binaries 1) downloaded from GitHub and 2) rebuilt from the last
+ official release.
+ """
+
+ d = dict()
+ for k, v in _binaries["development"]:
+ # require development binaries
+ assert v.is_file(), f"Couldn't find binary '{k}' expected at: {v}"
+ d[k] = v
+ for k, v in _binaries["downloaded"] + _binaries["rebuilt"]:
+ # downloaded/rebuilt binaries are optional
+ if v.is_file():
+ d[k] = v
+ else:
+ warn(f"Couldn't find binary '{k}' expected at: {v}")
+ return d
+
+
+def try_get_target(targets: Dict[str, Path], name: str) -> Path:
+ """Try to retrieve the path to a binary. If the binary is a development
+ target and can't be found, an error is raised. Otherwise (if the binary
+ is downloaded or rebuilt) the test is skipped. This is to allow testing
+ without downloaded or rebuilt binaries, e.g. if the network is down."""
+
+ exe = targets.get(name)
+ if exe:
+ return exe
+ elif name in _binaries["development"]:
+ raise ValueError(f"Couldn't find binary '{name}'")
+ else:
+ pytest.skip(f"Couldn't find binary '{name}'")
@pytest.fixture
@@ -55,7 +97,7 @@ def original_regression(request) -> bool:
@pytest.fixture(scope="session")
def markers(pytestconfig) -> str:
- return pytestconfig.getoption('-m')
+ return pytestconfig.getoption("-m")
def pytest_addoption(parser):
@@ -63,13 +105,13 @@ def pytest_addoption(parser):
"--original-regression",
action="store_true",
default=False,
- help="TODO",
+ help="use non-MF6 models for regression tests",
)
parser.addoption(
- "--parallel",
- action="store_true",
- default=False,
- help="include parallel test cases"
+ "--parallel",
+ action="store_true",
+ default=False,
+ help="include parallel test cases",
)
diff --git a/autotest/cross_section_functions.py b/autotest/cross_section_functions.py
index 1b22e137e0d..9bfda231dcd 100644
--- a/autotest/cross_section_functions.py
+++ b/autotest/cross_section_functions.py
@@ -12,7 +12,7 @@ def calculate_rectchan_mannings_discharge(
"""
area = width * depth
- return conversion_factor * area * depth ** mpow * slope ** 0.5 / roughness
+ return conversion_factor * area * depth**mpow * slope**0.5 / roughness
# n-point cross-section functions
@@ -82,7 +82,7 @@ def get_wetted_perimeter(
else:
dlen = 0.0
- return np.sqrt(xlen ** 2.0 + dlen ** 2.0)
+ return np.sqrt(xlen**2.0 + dlen**2.0)
def get_wetted_area(x0, x1, h0, h1, depth):
@@ -257,7 +257,7 @@ def manningsq(
if perimeter > 0.0:
radius = area / perimeter
q += (
- conv * area * radius ** mpow * slope ** 0.5 / roughness[i0]
+ conv * area * radius**mpow * slope**0.5 / roughness[i0]
)
else:
perimeter = wetted_perimeter(x, h, depth)
@@ -265,7 +265,7 @@ def manningsq(
radius = 0.0
if perimeter > 0.0:
radius = area / perimeter
- q = conv * area * radius ** mpow * slope ** 0.5 / roughness[0]
+ q = conv * area * radius**mpow * slope**0.5 / roughness[0]
return q
diff --git a/autotest/data/ex-gwf-bump/bottom.txt b/autotest/data/ex-gwf-bump/bottom.txt
new file mode 100644
index 00000000000..fc7264d51e2
--- /dev/null
+++ b/autotest/data/ex-gwf-bump/bottom.txt
@@ -0,0 +1,52 @@
+ 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 1.924449E-03 7.145278E-02 1.258685E-01 1.649210E-01 1.884270E-01 1.962749E-01 1.884270E-01 1.649210E-01 1.258685E-01 7.145278E-02 1.924449E-03 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
+ 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 7.920820E-02 1.962749E-01 2.988742E-01 3.865433E-01 4.588710E-01 5.155069E-01 5.561688E-01 5.806502E-01 5.888247E-01 5.806502E-01 5.561688E-01 5.155069E-01 4.588710E-01 3.865433E-01 2.988742E-01 1.962749E-01 7.920820E-02 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
+ 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 1.924449E-03 1.649210E-01 3.147551E-01 4.508076E-01 5.724826E-01 6.792177E-01 7.704949E-01 8.458526E-01 9.048952E-01 9.473040E-01 9.728448E-01 9.813745E-01 9.728448E-01 9.473040E-01 9.048952E-01 8.458526E-01 7.704949E-01 6.792177E-01 5.724826E-01 4.508076E-01 3.147551E-01 1.649210E-01 1.924449E-03 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
+ 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 1.727501E-01 3.545715E-01 5.236254E-01 6.792177E-01 8.206647E-01 9.473040E-01 1.058507E+00 1.153693E+00 1.232340E+00 1.294000E+00 1.338311E+00 1.365007E+00 1.373924E+00 1.365007E+00 1.338311E+00 1.294000E+00 1.232340E+00 1.153693E+00 1.058507E+00 9.473040E-01 8.206647E-01 6.792177E-01 5.236254E-01 3.545715E-01 1.727501E-01 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
+ 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 1.025109E-01 3.147551E-01 5.155069E-01 7.040227E-01 8.795444E-01 1.041309E+00 1.188560E+00 1.320560E+00 1.436603E+00 1.536034E+00 1.618264E+00 1.682781E+00 1.729173E+00 1.757133E+00 1.766474E+00 1.757133E+00 1.729173E+00 1.682781E+00 1.618264E+00 1.536034E+00 1.436603E+00 1.320560E+00 1.188560E+00 1.041309E+00 8.795444E-01 7.040227E-01 5.155069E-01 3.147551E-01 1.025109E-01 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
+ 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 1.962749E-01 4.266580E-01 6.462466E-01 8.542639E-01 1.049904E+00 1.232340E+00 1.400731E+00 1.554238E+00 1.692039E+00 1.813339E+00 1.917398E+00 2.003544E+00 2.071192E+00 2.119867E+00 2.149216E+00 2.159024E+00 2.149216E+00 2.119867E+00 2.071192E+00 2.003544E+00 1.917398E+00 1.813339E+00 1.692039E+00 1.554238E+00 1.400731E+00 1.232340E+00 1.049904E+00 8.542639E-01 6.462466E-01 4.266580E-01 1.962749E-01 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
+ 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 1.924449E-03 2.592852E-01 5.073954E-01 7.455119E-01 9.728448E-01 1.188560E+00 1.391786E+00 1.581618E+00 1.757133E+00 1.917398E+00 2.061492E+00 2.188521E+00 2.297640E+00 2.388082E+00 2.459174E+00 2.510366E+00 2.541251E+00 2.551574E+00 2.541251E+00 2.510366E+00 2.459174E+00 2.388082E+00 2.297640E+00 2.188521E+00 2.061492E+00 1.917398E+00 1.757133E+00 1.581618E+00 1.391786E+00 1.188560E+00 9.728448E-01 7.455119E-01 5.073954E-01 2.592852E-01 1.924449E-03 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
+ 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 1.733338E-02 2.909435E-01 5.561688E-01 8.122841E-01 1.058507E+00 1.294000E+00 1.517869E+00 1.729173E+00 1.926925E+00 2.110108E+00 2.277686E+00 2.428624E+00 2.561911E+00 2.676584E+00 2.771759E+00 2.846658E+00 2.900641E+00 2.933228E+00 2.944124E+00 2.933228E+00 2.900641E+00 2.846658E+00 2.771759E+00 2.676584E+00 2.561911E+00 2.428624E+00 2.277686E+00 2.110108E+00 1.926925E+00 1.729173E+00 1.517869E+00 1.294000E+00 1.058507E+00 8.122841E-01 5.561688E-01 2.909435E-01 1.733338E-02 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
+ 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 1.924449E-03 2.909435E-01 5.724826E-01 8.458526E-01 1.110300E+00 1.365007E+00 1.609087E+00 1.841588E+00 2.061492E+00 2.267728E+00 2.459174E+00 2.634679E+00 2.793080E+00 2.933228E+00 3.054020E+00 3.154432E+00 3.233560E+00 3.290650E+00 3.325138E+00 3.336673E+00 3.325138E+00 3.290650E+00 3.233560E+00 3.154432E+00 3.054020E+00 2.933228E+00 2.793080E+00 2.634679E+00 2.459174E+00 2.267728E+00 2.061492E+00 1.841588E+00 1.609087E+00 1.365007E+00 1.110300E+00 8.458526E-01 5.724826E-01 2.909435E-01 1.924449E-03 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
+ 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 2.592852E-01 5.561688E-01 8.458526E-01 1.127632E+00 1.400731E+00 1.664297E+00 1.917398E+00 2.159024E+00 2.388082E+00 2.603405E+00 2.803764E+00 2.987876E+00 3.154432E+00 3.302126E+00 3.429686E+00 3.535923E+00 3.619772E+00 3.680345E+00 3.716968E+00 3.729223E+00 3.716968E+00 3.680345E+00 3.619772E+00 3.535923E+00 3.429686E+00 3.302126E+00 3.154432E+00 2.987876E+00 2.803764E+00 2.603405E+00 2.388082E+00 2.159024E+00 1.917398E+00 1.664297E+00 1.400731E+00 1.127632E+00 8.458526E-01 5.561688E-01 2.592852E-01 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
+ 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 1.962749E-01 5.073954E-01 8.122841E-01 1.110300E+00 1.400731E+00 1.682781E+00 1.955571E+00 2.218129E+00 2.469384E+00 2.708172E+00 2.933228E+00 3.143203E+00 3.336673E+00 3.512165E+00 3.668184E+00 3.803263E+00 3.916008E+00 4.005163E+00 4.069664E+00 4.108703E+00 4.121773E+00 4.108703E+00 4.069664E+00 4.005163E+00 3.916008E+00 3.803263E+00 3.668184E+00 3.512165E+00 3.336673E+00 3.143203E+00 2.933228E+00 2.708172E+00 2.469384E+00 2.218129E+00 1.955571E+00 1.682781E+00 1.400731E+00 1.110300E+00 8.122841E-01 5.073954E-01 1.962749E-01 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
+ 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 1.025109E-01 4.266580E-01 7.455119E-01 1.058507E+00 1.365007E+00 1.664297E+00 1.955571E+00 2.237930E+00 2.510366E+00 2.771759E+00 3.020869E+00 3.256338E+00 3.476689E+00 3.680345E+00 3.865644E+00 4.030879E+00 4.174348E+00 4.294409E+00 4.389562E+00 4.458528E+00 4.500321E+00 4.514323E+00 4.500321E+00 4.458528E+00 4.389562E+00 4.294409E+00 4.174348E+00 4.030879E+00 3.865644E+00 3.680345E+00 3.476689E+00 3.256338E+00 3.020869E+00 2.771759E+00 2.510366E+00 2.237930E+00 1.955571E+00 1.664297E+00 1.365007E+00 1.058507E+00 7.455119E-01 4.266580E-01 1.025109E-01 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
+ 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 3.147551E-01 6.462466E-01 9.728448E-01 1.294000E+00 1.609087E+00 1.917398E+00 2.218129E+00 2.510366E+00 2.793080E+00 3.065105E+00 3.325138E+00 3.571724E+00 3.803263E+00 4.018007E+00 4.214092E+00 4.389562E+00 4.542434E+00 4.670765E+00 4.772752E+00 4.846834E+00 4.891797E+00 4.906873E+00 4.891797E+00 4.846834E+00 4.772752E+00 4.670765E+00 4.542434E+00 4.389562E+00 4.214092E+00 4.018007E+00 3.803263E+00 3.571724E+00 3.325138E+00 3.065105E+00 2.793080E+00 2.510366E+00 2.218129E+00 1.917398E+00 1.609087E+00 1.294000E+00 9.728448E-01 6.462466E-01 3.147551E-01 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
+ 0.000000E+00 0.000000E+00 0.000000E+00 1.727501E-01 5.155069E-01 8.542639E-01 1.188560E+00 1.517869E+00 1.841588E+00 2.159024E+00 2.469384E+00 2.771759E+00 3.065105E+00 3.348229E+00 3.619772E+00 3.878196E+00 4.121773E+00 4.348586E+00 4.556544E+00 4.743412E+00 4.906873E+00 5.044614E+00 5.154447E+00 5.234446E+00 5.283094E+00 5.299422E+00 5.283094E+00 5.234446E+00 5.154447E+00 5.044614E+00 4.906873E+00 4.743412E+00 4.556544E+00 4.348586E+00 4.121773E+00 3.878196E+00 3.619772E+00 3.348229E+00 3.065105E+00 2.771759E+00 2.469384E+00 2.159024E+00 1.841588E+00 1.517869E+00 1.188560E+00 8.542639E-01 5.155069E-01 1.727501E-01 0.000000E+00 0.000000E+00 0.000000E+00
+ 0.000000E+00 0.000000E+00 1.924449E-03 3.545715E-01 7.040227E-01 1.049904E+00 1.391786E+00 1.729173E+00 2.061492E+00 2.388082E+00 2.708172E+00 3.020869E+00 3.325138E+00 3.619772E+00 3.903378E+00 4.174348E+00 4.430839E+00 4.670765E+00 4.891797E+00 5.091385E+00 5.266823E+00 5.415342E+00 5.534264E+00 5.621180E+00 5.674166E+00 5.691972E+00 5.674166E+00 5.621180E+00 5.534264E+00 5.415342E+00 5.266823E+00 5.091385E+00 4.891797E+00 4.670765E+00 4.430839E+00 4.174348E+00 3.903378E+00 3.619772E+00 3.325138E+00 3.020869E+00 2.708172E+00 2.388082E+00 2.061492E+00 1.729173E+00 1.391786E+00 1.049904E+00 7.040227E-01 3.545715E-01 1.924449E-03 0.000000E+00 0.000000E+00
+ 0.000000E+00 0.000000E+00 1.649210E-01 5.236254E-01 8.795444E-01 1.232340E+00 1.581618E+00 1.926925E+00 2.267728E+00 2.603405E+00 2.933228E+00 3.256338E+00 3.571724E+00 3.878196E+00 4.174348E+00 4.458528E+00 4.728803E+00 4.982930E+00 5.218339E+00 5.432142E+00 5.621180E+00 5.782129E+00 5.911680E+00 6.006782E+00 6.064943E+00 6.084522E+00 6.064943E+00 6.006782E+00 5.911680E+00 5.782129E+00 5.621180E+00 5.432142E+00 5.218339E+00 4.982930E+00 4.728803E+00 4.458528E+00 4.174348E+00 3.878196E+00 3.571724E+00 3.256338E+00 2.933228E+00 2.603405E+00 2.267728E+00 1.926925E+00 1.581618E+00 1.232340E+00 8.795444E-01 5.236254E-01 1.649210E-01 0.000000E+00 0.000000E+00
+ 0.000000E+00 0.000000E+00 3.147551E-01 6.792177E-01 1.041309E+00 1.400731E+00 1.757133E+00 2.110108E+00 2.459174E+00 2.803764E+00 3.143203E+00 3.476689E+00 3.803263E+00 4.121773E+00 4.430839E+00 4.728803E+00 5.013677E+00 5.283094E+00 5.534264E+00 5.763945E+00 5.968472E+00 6.143853E+00 6.285965E+00 6.390890E+00 6.455330E+00 6.477072E+00 6.455330E+00 6.390890E+00 6.285965E+00 6.143853E+00 5.968472E+00 5.763945E+00 5.534264E+00 5.283094E+00 5.013677E+00 4.728803E+00 4.430839E+00 4.121773E+00 3.803263E+00 3.476689E+00 3.143203E+00 2.803764E+00 2.459174E+00 2.110108E+00 1.757133E+00 1.400731E+00 1.041309E+00 6.792177E-01 3.147551E-01 0.000000E+00 0.000000E+00
+ 0.000000E+00 7.920820E-02 4.508076E-01 8.206647E-01 1.188560E+00 1.554238E+00 1.917398E+00 2.277686E+00 2.634679E+00 2.987876E+00 3.336673E+00 3.680345E+00 4.018007E+00 4.348586E+00 4.670765E+00 4.982930E+00 5.283094E+00 5.568826E+00 5.837158E+00 6.084522E+00 6.306713E+00 6.498948E+00 6.656073E+00 6.772972E+00 6.845182E+00 6.869622E+00 6.845182E+00 6.772972E+00 6.656073E+00 6.498948E+00 6.306713E+00 6.084522E+00 5.837158E+00 5.568826E+00 5.283094E+00 4.982930E+00 4.670765E+00 4.348586E+00 4.018007E+00 3.680345E+00 3.336673E+00 2.987876E+00 2.634679E+00 2.277686E+00 1.917398E+00 1.554238E+00 1.188560E+00 8.206647E-01 4.508076E-01 7.920820E-02 0.000000E+00
+ 0.000000E+00 1.962749E-01 5.724826E-01 9.473040E-01 1.320560E+00 1.692039E+00 2.061492E+00 2.428624E+00 2.793080E+00 3.154432E+00 3.512165E+00 3.865644E+00 4.214092E+00 4.556544E+00 4.891797E+00 5.218339E+00 5.534264E+00 5.837158E+00 6.123975E+00 6.390890E+00 6.633179E+00 6.845182E+00 7.020450E+00 7.152215E+00 7.234274E+00 7.262171E+00 7.234274E+00 7.152215E+00 7.020450E+00 6.845182E+00 6.633179E+00 6.390890E+00 6.123975E+00 5.837158E+00 5.534264E+00 5.218339E+00 4.891797E+00 4.556544E+00 4.214092E+00 3.865644E+00 3.512165E+00 3.154432E+00 2.793080E+00 2.428624E+00 2.061492E+00 1.692039E+00 1.320560E+00 9.473040E-01 5.724826E-01 1.962749E-01 0.000000E+00
+ 0.000000E+00 2.988742E-01 6.792177E-01 1.058507E+00 1.436603E+00 1.813339E+00 2.188521E+00 2.561911E+00 2.933228E+00 3.302126E+00 3.668184E+00 4.030879E+00 4.389562E+00 4.743412E+00 5.091385E+00 5.432142E+00 5.763945E+00 6.084522E+00 6.390890E+00 6.679124E+00 6.944108E+00 7.179303E+00 7.376716E+00 7.527317E+00 7.622233E+00 7.654721E+00 7.622233E+00 7.527317E+00 7.376716E+00 7.179303E+00 6.944108E+00 6.679124E+00 6.390890E+00 6.084522E+00 5.763945E+00 5.432142E+00 5.091385E+00 4.743412E+00 4.389562E+00 4.030879E+00 3.668184E+00 3.302126E+00 2.933228E+00 2.561911E+00 2.188521E+00 1.813339E+00 1.436603E+00 1.058507E+00 6.792177E-01 2.988742E-01 0.000000E+00
+ 1.924449E-03 3.865433E-01 7.704949E-01 1.153693E+00 1.536034E+00 1.917398E+00 2.297640E+00 2.676584E+00 3.054020E+00 3.429686E+00 3.803263E+00 4.174348E+00 4.542434E+00 4.906873E+00 5.266823E+00 5.621180E+00 5.968472E+00 6.306713E+00 6.633179E+00 6.944108E+00 7.234274E+00 7.496475E+00 7.721081E+00 7.896075E+00 8.008401E+00 8.047271E+00 8.008401E+00 7.896075E+00 7.721081E+00 7.496475E+00 7.234274E+00 6.944108E+00 6.633179E+00 6.306713E+00 5.968472E+00 5.621180E+00 5.266823E+00 4.906873E+00 4.542434E+00 4.174348E+00 3.803263E+00 3.429686E+00 3.054020E+00 2.676584E+00 2.297640E+00 1.917398E+00 1.536034E+00 1.153693E+00 7.704949E-01 3.865433E-01 1.924449E-03
+ 7.145278E-02 4.588710E-01 8.458526E-01 1.232340E+00 1.618264E+00 2.003544E+00 2.388082E+00 2.771759E+00 3.154432E+00 3.535923E+00 3.916008E+00 4.294409E+00 4.670765E+00 5.044614E+00 5.415342E+00 5.782129E+00 6.143853E+00 6.498948E+00 6.845182E+00 7.179303E+00 7.496475E+00 7.789423E+00 8.047271E+00 8.254484E+00 8.391496E+00 8.439821E+00 8.391496E+00 8.254484E+00 8.047271E+00 7.789423E+00 7.496475E+00 7.179303E+00 6.845182E+00 6.498948E+00 6.143853E+00 5.782129E+00 5.415342E+00 5.044614E+00 4.670765E+00 4.294409E+00 3.916008E+00 3.535923E+00 3.154432E+00 2.771759E+00 2.388082E+00 2.003544E+00 1.618264E+00 1.232340E+00 8.458526E-01 4.588710E-01 7.145278E-02
+ 1.258685E-01 5.155069E-01 9.048952E-01 1.294000E+00 1.682781E+00 2.071192E+00 2.459174E+00 2.846658E+00 3.233560E+00 3.619772E+00 4.005163E+00 4.389562E+00 4.772752E+00 5.154447E+00 5.534264E+00 5.911680E+00 6.285965E+00 6.656073E+00 7.020450E+00 7.376716E+00 7.721081E+00 8.047271E+00 8.344572E+00 8.594662E+00 8.768668E+00 8.832371E+00 8.768668E+00 8.594662E+00 8.344572E+00 8.047271E+00 7.721081E+00 7.376716E+00 7.020450E+00 6.656073E+00 6.285965E+00 5.911680E+00 5.534264E+00 5.154447E+00 4.772752E+00 4.389562E+00 4.005163E+00 3.619772E+00 3.233560E+00 2.846658E+00 2.459174E+00 2.071192E+00 1.682781E+00 1.294000E+00 9.048952E-01 5.155069E-01 1.258685E-01
+ 1.649210E-01 5.561688E-01 9.473040E-01 1.338311E+00 1.729173E+00 2.119867E+00 2.510366E+00 2.900641E+00 3.290650E+00 3.680345E+00 4.069664E+00 4.458528E+00 4.846834E+00 5.234446E+00 5.621180E+00 6.006782E+00 6.390890E+00 6.772972E+00 7.152215E+00 7.527317E+00 7.896075E+00 8.254484E+00 8.594662E+00 8.899721E+00 9.132252E+00 9.224920E+00 9.132252E+00 8.899721E+00 8.594662E+00 8.254484E+00 7.896075E+00 7.527317E+00 7.152215E+00 6.772972E+00 6.390890E+00 6.006782E+00 5.621180E+00 5.234446E+00 4.846834E+00 4.458528E+00 4.069664E+00 3.680345E+00 3.290650E+00 2.900641E+00 2.510366E+00 2.119867E+00 1.729173E+00 1.338311E+00 9.473040E-01 5.561688E-01 1.649210E-01
+ 1.884270E-01 5.806502E-01 9.728448E-01 1.365007E+00 1.757133E+00 2.149216E+00 2.541251E+00 2.933228E+00 3.325138E+00 3.716968E+00 4.108703E+00 4.500321E+00 4.891797E+00 5.283094E+00 5.674166E+00 6.064943E+00 6.455330E+00 6.845182E+00 7.234274E+00 7.622233E+00 8.008401E+00 8.391496E+00 8.768668E+00 9.132252E+00 9.454871E+00 9.617470E+00 9.454871E+00 9.132252E+00 8.768668E+00 8.391496E+00 8.008401E+00 7.622233E+00 7.234274E+00 6.845182E+00 6.455330E+00 6.064943E+00 5.674166E+00 5.283094E+00 4.891797E+00 4.500321E+00 4.108703E+00 3.716968E+00 3.325138E+00 2.933228E+00 2.541251E+00 2.149216E+00 1.757133E+00 1.365007E+00 9.728448E-01 5.806502E-01 1.884270E-01
+ 1.962749E-01 5.888247E-01 9.813745E-01 1.373924E+00 1.766474E+00 2.159024E+00 2.551574E+00 2.944124E+00 3.336673E+00 3.729223E+00 4.121773E+00 4.514323E+00 4.906873E+00 5.299422E+00 5.691972E+00 6.084522E+00 6.477072E+00 6.869622E+00 7.262171E+00 7.654721E+00 8.047271E+00 8.439821E+00 8.832371E+00 9.224920E+00 9.617470E+00 9.990000E+00 9.617470E+00 9.224920E+00 8.832371E+00 8.439821E+00 8.047271E+00 7.654721E+00 7.262171E+00 6.869622E+00 6.477072E+00 6.084522E+00 5.691972E+00 5.299422E+00 4.906873E+00 4.514323E+00 4.121773E+00 3.729223E+00 3.336673E+00 2.944124E+00 2.551574E+00 2.159024E+00 1.766474E+00 1.373924E+00 9.813745E-01 5.888247E-01 1.962749E-01
+ 1.884270E-01 5.806502E-01 9.728448E-01 1.365007E+00 1.757133E+00 2.149216E+00 2.541251E+00 2.933228E+00 3.325138E+00 3.716968E+00 4.108703E+00 4.500321E+00 4.891797E+00 5.283094E+00 5.674166E+00 6.064943E+00 6.455330E+00 6.845182E+00 7.234274E+00 7.622233E+00 8.008401E+00 8.391496E+00 8.768668E+00 9.132252E+00 9.454871E+00 9.617470E+00 9.454871E+00 9.132252E+00 8.768668E+00 8.391496E+00 8.008401E+00 7.622233E+00 7.234274E+00 6.845182E+00 6.455330E+00 6.064943E+00 5.674166E+00 5.283094E+00 4.891797E+00 4.500321E+00 4.108703E+00 3.716968E+00 3.325138E+00 2.933228E+00 2.541251E+00 2.149216E+00 1.757133E+00 1.365007E+00 9.728448E-01 5.806502E-01 1.884270E-01
+ 1.649210E-01 5.561688E-01 9.473040E-01 1.338311E+00 1.729173E+00 2.119867E+00 2.510366E+00 2.900641E+00 3.290650E+00 3.680345E+00 4.069664E+00 4.458528E+00 4.846834E+00 5.234446E+00 5.621180E+00 6.006782E+00 6.390890E+00 6.772972E+00 7.152215E+00 7.527317E+00 7.896075E+00 8.254484E+00 8.594662E+00 8.899721E+00 9.132252E+00 9.224920E+00 9.132252E+00 8.899721E+00 8.594662E+00 8.254484E+00 7.896075E+00 7.527317E+00 7.152215E+00 6.772972E+00 6.390890E+00 6.006782E+00 5.621180E+00 5.234446E+00 4.846834E+00 4.458528E+00 4.069664E+00 3.680345E+00 3.290650E+00 2.900641E+00 2.510366E+00 2.119867E+00 1.729173E+00 1.338311E+00 9.473040E-01 5.561688E-01 1.649210E-01
+ 1.258685E-01 5.155069E-01 9.048952E-01 1.294000E+00 1.682781E+00 2.071192E+00 2.459174E+00 2.846658E+00 3.233560E+00 3.619772E+00 4.005163E+00 4.389562E+00 4.772752E+00 5.154447E+00 5.534264E+00 5.911680E+00 6.285965E+00 6.656073E+00 7.020450E+00 7.376716E+00 7.721081E+00 8.047271E+00 8.344572E+00 8.594662E+00 8.768668E+00 8.832371E+00 8.768668E+00 8.594662E+00 8.344572E+00 8.047271E+00 7.721081E+00 7.376716E+00 7.020450E+00 6.656073E+00 6.285965E+00 5.911680E+00 5.534264E+00 5.154447E+00 4.772752E+00 4.389562E+00 4.005163E+00 3.619772E+00 3.233560E+00 2.846658E+00 2.459174E+00 2.071192E+00 1.682781E+00 1.294000E+00 9.048952E-01 5.155069E-01 1.258685E-01
+ 7.145278E-02 4.588710E-01 8.458526E-01 1.232340E+00 1.618264E+00 2.003544E+00 2.388082E+00 2.771759E+00 3.154432E+00 3.535923E+00 3.916008E+00 4.294409E+00 4.670765E+00 5.044614E+00 5.415342E+00 5.782129E+00 6.143853E+00 6.498948E+00 6.845182E+00 7.179303E+00 7.496475E+00 7.789423E+00 8.047271E+00 8.254484E+00 8.391496E+00 8.439821E+00 8.391496E+00 8.254484E+00 8.047271E+00 7.789423E+00 7.496475E+00 7.179303E+00 6.845182E+00 6.498948E+00 6.143853E+00 5.782129E+00 5.415342E+00 5.044614E+00 4.670765E+00 4.294409E+00 3.916008E+00 3.535923E+00 3.154432E+00 2.771759E+00 2.388082E+00 2.003544E+00 1.618264E+00 1.232340E+00 8.458526E-01 4.588710E-01 7.145278E-02
+ 1.924449E-03 3.865433E-01 7.704949E-01 1.153693E+00 1.536034E+00 1.917398E+00 2.297640E+00 2.676584E+00 3.054020E+00 3.429686E+00 3.803263E+00 4.174348E+00 4.542434E+00 4.906873E+00 5.266823E+00 5.621180E+00 5.968472E+00 6.306713E+00 6.633179E+00 6.944108E+00 7.234274E+00 7.496475E+00 7.721081E+00 7.896075E+00 8.008401E+00 8.047271E+00 8.008401E+00 7.896075E+00 7.721081E+00 7.496475E+00 7.234274E+00 6.944108E+00 6.633179E+00 6.306713E+00 5.968472E+00 5.621180E+00 5.266823E+00 4.906873E+00 4.542434E+00 4.174348E+00 3.803263E+00 3.429686E+00 3.054020E+00 2.676584E+00 2.297640E+00 1.917398E+00 1.536034E+00 1.153693E+00 7.704949E-01 3.865433E-01 1.924449E-03
+ 0.000000E+00 2.988742E-01 6.792177E-01 1.058507E+00 1.436603E+00 1.813339E+00 2.188521E+00 2.561911E+00 2.933228E+00 3.302126E+00 3.668184E+00 4.030879E+00 4.389562E+00 4.743412E+00 5.091385E+00 5.432142E+00 5.763945E+00 6.084522E+00 6.390890E+00 6.679124E+00 6.944108E+00 7.179303E+00 7.376716E+00 7.527317E+00 7.622233E+00 7.654721E+00 7.622233E+00 7.527317E+00 7.376716E+00 7.179303E+00 6.944108E+00 6.679124E+00 6.390890E+00 6.084522E+00 5.763945E+00 5.432142E+00 5.091385E+00 4.743412E+00 4.389562E+00 4.030879E+00 3.668184E+00 3.302126E+00 2.933228E+00 2.561911E+00 2.188521E+00 1.813339E+00 1.436603E+00 1.058507E+00 6.792177E-01 2.988742E-01 0.000000E+00
+ 0.000000E+00 1.962749E-01 5.724826E-01 9.473040E-01 1.320560E+00 1.692039E+00 2.061492E+00 2.428624E+00 2.793080E+00 3.154432E+00 3.512165E+00 3.865644E+00 4.214092E+00 4.556544E+00 4.891797E+00 5.218339E+00 5.534264E+00 5.837158E+00 6.123975E+00 6.390890E+00 6.633179E+00 6.845182E+00 7.020450E+00 7.152215E+00 7.234274E+00 7.262171E+00 7.234274E+00 7.152215E+00 7.020450E+00 6.845182E+00 6.633179E+00 6.390890E+00 6.123975E+00 5.837158E+00 5.534264E+00 5.218339E+00 4.891797E+00 4.556544E+00 4.214092E+00 3.865644E+00 3.512165E+00 3.154432E+00 2.793080E+00 2.428624E+00 2.061492E+00 1.692039E+00 1.320560E+00 9.473040E-01 5.724826E-01 1.962749E-01 0.000000E+00
+ 0.000000E+00 7.920820E-02 4.508076E-01 8.206647E-01 1.188560E+00 1.554238E+00 1.917398E+00 2.277686E+00 2.634679E+00 2.987876E+00 3.336673E+00 3.680345E+00 4.018007E+00 4.348586E+00 4.670765E+00 4.982930E+00 5.283094E+00 5.568826E+00 5.837158E+00 6.084522E+00 6.306713E+00 6.498948E+00 6.656073E+00 6.772972E+00 6.845182E+00 6.869622E+00 6.845182E+00 6.772972E+00 6.656073E+00 6.498948E+00 6.306713E+00 6.084522E+00 5.837158E+00 5.568826E+00 5.283094E+00 4.982930E+00 4.670765E+00 4.348586E+00 4.018007E+00 3.680345E+00 3.336673E+00 2.987876E+00 2.634679E+00 2.277686E+00 1.917398E+00 1.554238E+00 1.188560E+00 8.206647E-01 4.508076E-01 7.920820E-02 0.000000E+00
+ 0.000000E+00 0.000000E+00 3.147551E-01 6.792177E-01 1.041309E+00 1.400731E+00 1.757133E+00 2.110108E+00 2.459174E+00 2.803764E+00 3.143203E+00 3.476689E+00 3.803263E+00 4.121773E+00 4.430839E+00 4.728803E+00 5.013677E+00 5.283094E+00 5.534264E+00 5.763945E+00 5.968472E+00 6.143853E+00 6.285965E+00 6.390890E+00 6.455330E+00 6.477072E+00 6.455330E+00 6.390890E+00 6.285965E+00 6.143853E+00 5.968472E+00 5.763945E+00 5.534264E+00 5.283094E+00 5.013677E+00 4.728803E+00 4.430839E+00 4.121773E+00 3.803263E+00 3.476689E+00 3.143203E+00 2.803764E+00 2.459174E+00 2.110108E+00 1.757133E+00 1.400731E+00 1.041309E+00 6.792177E-01 3.147551E-01 0.000000E+00 0.000000E+00
+ 0.000000E+00 0.000000E+00 1.649210E-01 5.236254E-01 8.795444E-01 1.232340E+00 1.581618E+00 1.926925E+00 2.267728E+00 2.603405E+00 2.933228E+00 3.256338E+00 3.571724E+00 3.878196E+00 4.174348E+00 4.458528E+00 4.728803E+00 4.982930E+00 5.218339E+00 5.432142E+00 5.621180E+00 5.782129E+00 5.911680E+00 6.006782E+00 6.064943E+00 6.084522E+00 6.064943E+00 6.006782E+00 5.911680E+00 5.782129E+00 5.621180E+00 5.432142E+00 5.218339E+00 4.982930E+00 4.728803E+00 4.458528E+00 4.174348E+00 3.878196E+00 3.571724E+00 3.256338E+00 2.933228E+00 2.603405E+00 2.267728E+00 1.926925E+00 1.581618E+00 1.232340E+00 8.795444E-01 5.236254E-01 1.649210E-01 0.000000E+00 0.000000E+00
+ 0.000000E+00 0.000000E+00 1.924449E-03 3.545715E-01 7.040227E-01 1.049904E+00 1.391786E+00 1.729173E+00 2.061492E+00 2.388082E+00 2.708172E+00 3.020869E+00 3.325138E+00 3.619772E+00 3.903378E+00 4.174348E+00 4.430839E+00 4.670765E+00 4.891797E+00 5.091385E+00 5.266823E+00 5.415342E+00 5.534264E+00 5.621180E+00 5.674166E+00 5.691972E+00 5.674166E+00 5.621180E+00 5.534264E+00 5.415342E+00 5.266823E+00 5.091385E+00 4.891797E+00 4.670765E+00 4.430839E+00 4.174348E+00 3.903378E+00 3.619772E+00 3.325138E+00 3.020869E+00 2.708172E+00 2.388082E+00 2.061492E+00 1.729173E+00 1.391786E+00 1.049904E+00 7.040227E-01 3.545715E-01 1.924449E-03 0.000000E+00 0.000000E+00
+ 0.000000E+00 0.000000E+00 0.000000E+00 1.727501E-01 5.155069E-01 8.542639E-01 1.188560E+00 1.517869E+00 1.841588E+00 2.159024E+00 2.469384E+00 2.771759E+00 3.065105E+00 3.348229E+00 3.619772E+00 3.878196E+00 4.121773E+00 4.348586E+00 4.556544E+00 4.743412E+00 4.906873E+00 5.044614E+00 5.154447E+00 5.234446E+00 5.283094E+00 5.299422E+00 5.283094E+00 5.234446E+00 5.154447E+00 5.044614E+00 4.906873E+00 4.743412E+00 4.556544E+00 4.348586E+00 4.121773E+00 3.878196E+00 3.619772E+00 3.348229E+00 3.065105E+00 2.771759E+00 2.469384E+00 2.159024E+00 1.841588E+00 1.517869E+00 1.188560E+00 8.542639E-01 5.155069E-01 1.727501E-01 0.000000E+00 0.000000E+00 0.000000E+00
+ 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 3.147551E-01 6.462466E-01 9.728448E-01 1.294000E+00 1.609087E+00 1.917398E+00 2.218129E+00 2.510366E+00 2.793080E+00 3.065105E+00 3.325138E+00 3.571724E+00 3.803263E+00 4.018007E+00 4.214092E+00 4.389562E+00 4.542434E+00 4.670765E+00 4.772752E+00 4.846834E+00 4.891797E+00 4.906873E+00 4.891797E+00 4.846834E+00 4.772752E+00 4.670765E+00 4.542434E+00 4.389562E+00 4.214092E+00 4.018007E+00 3.803263E+00 3.571724E+00 3.325138E+00 3.065105E+00 2.793080E+00 2.510366E+00 2.218129E+00 1.917398E+00 1.609087E+00 1.294000E+00 9.728448E-01 6.462466E-01 3.147551E-01 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
+ 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 1.025109E-01 4.266580E-01 7.455119E-01 1.058507E+00 1.365007E+00 1.664297E+00 1.955571E+00 2.237930E+00 2.510366E+00 2.771759E+00 3.020869E+00 3.256338E+00 3.476689E+00 3.680345E+00 3.865644E+00 4.030879E+00 4.174348E+00 4.294409E+00 4.389562E+00 4.458528E+00 4.500321E+00 4.514323E+00 4.500321E+00 4.458528E+00 4.389562E+00 4.294409E+00 4.174348E+00 4.030879E+00 3.865644E+00 3.680345E+00 3.476689E+00 3.256338E+00 3.020869E+00 2.771759E+00 2.510366E+00 2.237930E+00 1.955571E+00 1.664297E+00 1.365007E+00 1.058507E+00 7.455119E-01 4.266580E-01 1.025109E-01 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
+ 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 1.962749E-01 5.073954E-01 8.122841E-01 1.110300E+00 1.400731E+00 1.682781E+00 1.955571E+00 2.218129E+00 2.469384E+00 2.708172E+00 2.933228E+00 3.143203E+00 3.336673E+00 3.512165E+00 3.668184E+00 3.803263E+00 3.916008E+00 4.005163E+00 4.069664E+00 4.108703E+00 4.121773E+00 4.108703E+00 4.069664E+00 4.005163E+00 3.916008E+00 3.803263E+00 3.668184E+00 3.512165E+00 3.336673E+00 3.143203E+00 2.933228E+00 2.708172E+00 2.469384E+00 2.218129E+00 1.955571E+00 1.682781E+00 1.400731E+00 1.110300E+00 8.122841E-01 5.073954E-01 1.962749E-01 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
+ 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 2.592852E-01 5.561688E-01 8.458526E-01 1.127632E+00 1.400731E+00 1.664297E+00 1.917398E+00 2.159024E+00 2.388082E+00 2.603405E+00 2.803764E+00 2.987876E+00 3.154432E+00 3.302126E+00 3.429686E+00 3.535923E+00 3.619772E+00 3.680345E+00 3.716968E+00 3.729223E+00 3.716968E+00 3.680345E+00 3.619772E+00 3.535923E+00 3.429686E+00 3.302126E+00 3.154432E+00 2.987876E+00 2.803764E+00 2.603405E+00 2.388082E+00 2.159024E+00 1.917398E+00 1.664297E+00 1.400731E+00 1.127632E+00 8.458526E-01 5.561688E-01 2.592852E-01 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
+ 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 1.924449E-03 2.909435E-01 5.724826E-01 8.458526E-01 1.110300E+00 1.365007E+00 1.609087E+00 1.841588E+00 2.061492E+00 2.267728E+00 2.459174E+00 2.634679E+00 2.793080E+00 2.933228E+00 3.054020E+00 3.154432E+00 3.233560E+00 3.290650E+00 3.325138E+00 3.336673E+00 3.325138E+00 3.290650E+00 3.233560E+00 3.154432E+00 3.054020E+00 2.933228E+00 2.793080E+00 2.634679E+00 2.459174E+00 2.267728E+00 2.061492E+00 1.841588E+00 1.609087E+00 1.365007E+00 1.110300E+00 8.458526E-01 5.724826E-01 2.909435E-01 1.924449E-03 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
+ 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 1.733338E-02 2.909435E-01 5.561688E-01 8.122841E-01 1.058507E+00 1.294000E+00 1.517869E+00 1.729173E+00 1.926925E+00 2.110108E+00 2.277686E+00 2.428624E+00 2.561911E+00 2.676584E+00 2.771759E+00 2.846658E+00 2.900641E+00 2.933228E+00 2.944124E+00 2.933228E+00 2.900641E+00 2.846658E+00 2.771759E+00 2.676584E+00 2.561911E+00 2.428624E+00 2.277686E+00 2.110108E+00 1.926925E+00 1.729173E+00 1.517869E+00 1.294000E+00 1.058507E+00 8.122841E-01 5.561688E-01 2.909435E-01 1.733338E-02 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
+ 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 1.924449E-03 2.592852E-01 5.073954E-01 7.455119E-01 9.728448E-01 1.188560E+00 1.391786E+00 1.581618E+00 1.757133E+00 1.917398E+00 2.061492E+00 2.188521E+00 2.297640E+00 2.388082E+00 2.459174E+00 2.510366E+00 2.541251E+00 2.551574E+00 2.541251E+00 2.510366E+00 2.459174E+00 2.388082E+00 2.297640E+00 2.188521E+00 2.061492E+00 1.917398E+00 1.757133E+00 1.581618E+00 1.391786E+00 1.188560E+00 9.728448E-01 7.455119E-01 5.073954E-01 2.592852E-01 1.924449E-03 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
+ 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 1.962749E-01 4.266580E-01 6.462466E-01 8.542639E-01 1.049904E+00 1.232340E+00 1.400731E+00 1.554238E+00 1.692039E+00 1.813339E+00 1.917398E+00 2.003544E+00 2.071192E+00 2.119867E+00 2.149216E+00 2.159024E+00 2.149216E+00 2.119867E+00 2.071192E+00 2.003544E+00 1.917398E+00 1.813339E+00 1.692039E+00 1.554238E+00 1.400731E+00 1.232340E+00 1.049904E+00 8.542639E-01 6.462466E-01 4.266580E-01 1.962749E-01 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
+ 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 1.025109E-01 3.147551E-01 5.155069E-01 7.040227E-01 8.795444E-01 1.041309E+00 1.188560E+00 1.320560E+00 1.436603E+00 1.536034E+00 1.618264E+00 1.682781E+00 1.729173E+00 1.757133E+00 1.766474E+00 1.757133E+00 1.729173E+00 1.682781E+00 1.618264E+00 1.536034E+00 1.436603E+00 1.320560E+00 1.188560E+00 1.041309E+00 8.795444E-01 7.040227E-01 5.155069E-01 3.147551E-01 1.025109E-01 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
+ 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 1.727501E-01 3.545715E-01 5.236254E-01 6.792177E-01 8.206647E-01 9.473040E-01 1.058507E+00 1.153693E+00 1.232340E+00 1.294000E+00 1.338311E+00 1.365007E+00 1.373924E+00 1.365007E+00 1.338311E+00 1.294000E+00 1.232340E+00 1.153693E+00 1.058507E+00 9.473040E-01 8.206647E-01 6.792177E-01 5.236254E-01 3.545715E-01 1.727501E-01 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
+ 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 1.924449E-03 1.649210E-01 3.147551E-01 4.508076E-01 5.724826E-01 6.792177E-01 7.704949E-01 8.458526E-01 9.048952E-01 9.473040E-01 9.728448E-01 9.813745E-01 9.728448E-01 9.473040E-01 9.048952E-01 8.458526E-01 7.704949E-01 6.792177E-01 5.724826E-01 4.508076E-01 3.147551E-01 1.649210E-01 1.924449E-03 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
+ 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 7.920820E-02 1.962749E-01 2.988742E-01 3.865433E-01 4.588710E-01 5.155069E-01 5.561688E-01 5.806502E-01 5.888247E-01 5.806502E-01 5.561688E-01 5.155069E-01 4.588710E-01 3.865433E-01 2.988742E-01 1.962749E-01 7.920820E-02 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
+ 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 1.924449E-03 7.145278E-02 1.258685E-01 1.649210E-01 1.884270E-01 1.962749E-01 1.884270E-01 1.649210E-01 1.258685E-01 7.145278E-02 1.924449E-03 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
+
diff --git a/autotest/data/ex-gwf-bump/results.hds.cmp b/autotest/data/ex-gwf-bump/results.hds.cmp
new file mode 100644
index 00000000000..ec8e622f7ba
Binary files /dev/null and b/autotest/data/ex-gwf-bump/results.hds.cmp differ
diff --git a/autotest/data/prudic2004test2/sfr-packagedata.dat b/autotest/data/prudic2004test2/sfr-packagedata.dat
index 7c6623704c5..ca8dff8916a 100644
--- a/autotest/data/prudic2004test2/sfr-packagedata.dat
+++ b/autotest/data/prudic2004test2/sfr-packagedata.dat
@@ -1,39 +1,39 @@
-#rno rlen rwid rgrd rtp rbth rhk man ncon ustrf ndv
- 0 400.0000 5.000000 0.1818182E-02 48.63636 1.000000 100.0000 0.3000000E-01 1 1.0000 0
- 1 200.0000 5.000000 0.1818182E-02 48.09091 1.000000 100.0000 0.3000000E-01 2 1.0000 0
- 2 400.0000 5.000000 0.1818182E-02 47.54546 1.000000 100.0000 0.3000000E-01 2 1.0000 0
- 3 400.0000 5.000000 0.1818182E-02 46.81818 1.000000 100.0000 0.3000000E-01 2 1.0000 0
- 4 400.0000 5.000000 0.1818182E-02 46.09091 1.000000 100.0000 0.3000000E-01 2 1.0000 0
- 5 400.0000 5.000000 0.1818182E-02 45.36364 1.000000 100.0000 0.3000000E-01 1 1.0000 0
- 6 400.0000 5.000000 0.2187500E-02 44.06250 1.000000 100.0000 0.3000000E-01 1 1.0000 0
- 7 400.0000 5.000000 0.2187500E-02 43.18750 1.000000 100.0000 0.3000000E-01 2 1.0000 0
- 8 400.0000 5.000000 0.2187500E-02 42.31250 1.000000 100.0000 0.3000000E-01 2 1.0000 0
- 9 400.0000 5.000000 0.2187500E-02 41.43750 1.000000 100.0000 0.3000000E-01 2 1.0000 0
- 10 400.0000 5.000000 0.2187500E-02 40.56250 1.000000 100.0000 0.3000000E-01 2 1.0000 0
- 11 400.0000 5.000000 0.2187500E-02 39.68750 1.000000 100.0000 0.3000000E-01 2 1.0000 0
- 12 400.0000 5.000000 0.2187500E-02 38.81250 1.000000 100.0000 0.3000000E-01 2 1.0000 0
- 13 400.0000 5.000000 0.2187500E-02 37.93750 1.000000 100.0000 0.3000000E-01 2 1.0000 0
- 14 400.0000 5.000000 0.2187500E-02 37.06250 1.000000 100.0000 0.3000000E-01 2 1.0000 0
- 15 400.0000 5.000000 0.2187500E-02 36.18750 1.000000 100.0000 0.3000000E-01 2 1.0000 0
- 16 400.0000 5.000000 0.2187500E-02 35.31250 1.000000 100.0000 0.3000000E-01 2 1.0000 0
- 17 400.0000 5.000000 0.2187500E-02 34.43750 1.000000 100.0000 0.3000000E-01 2 1.0000 0
- 18 400.0000 5.000000 0.1704545E-02 41.15909 1.000000 100.0000 0.3000000E-01 1 1.0000 0
- 19 200.0000 5.000000 0.1704545E-02 40.64773 1.000000 100.0000 0.3000000E-01 2 1.0000 0
- 20 200.0000 5.000000 0.1704545E-02 40.30682 1.000000 100.0000 0.3000000E-01 2 1.0000 0
- 21 400.0000 5.000000 0.1704545E-02 39.79546 1.000000 100.0000 0.3000000E-01 2 1.0000 0
- 22 400.0000 5.000000 0.1704545E-02 39.11364 1.000000 100.0000 0.3000000E-01 2 1.0000 0
- 23 400.0000 5.000000 0.1704545E-02 38.43182 1.000000 100.0000 0.3000000E-01 2 1.0000 0
- 24 400.0000 5.000000 0.1704545E-02 37.75000 1.000000 100.0000 0.3000000E-01 2 1.0000 0
- 25 200.0000 5.000000 0.1704545E-02 37.23864 1.000000 100.0000 0.3000000E-01 2 1.0000 0
- 26 200.0000 5.000000 0.1704545E-02 36.89773 1.000000 100.0000 0.3000000E-01 2 1.0000 0
- 27 400.0000 5.000000 0.1704545E-02 36.38636 1.000000 100.0000 0.3000000E-01 2 1.0000 0
- 28 400.0000 5.000000 0.1704545E-02 35.70454 1.000000 100.0000 0.3000000E-01 2 1.0000 0
- 29 400.0000 5.000000 0.1704545E-02 35.02273 1.000000 100.0000 0.3000000E-01 2 1.0000 0
- 30 400.0000 5.000000 0.1704545E-02 34.34091 1.000000 100.0000 0.3000000E-01 2 1.0000 0
- 31 400.0000 5.000000 0.2428571E-02 33.51429 1.000000 100.0000 0.3000000E-01 3 1.0000 0
- 32 400.0000 5.000000 0.2428571E-02 32.54286 1.000000 100.0000 0.3000000E-01 2 1.0000 0
- 33 400.0000 5.000000 0.2428571E-02 31.57143 1.000000 100.0000 0.3000000E-01 2 1.0000 0
- 34 400.0000 5.000000 0.2428571E-02 30.60000 1.000000 100.0000 0.3000000E-01 2 1.0000 0
- 35 400.0000 5.000000 0.2428571E-02 29.62857 1.000000 100.0000 0.3000000E-01 2 1.0000 0
- 36 400.0000 5.000000 0.2428571E-02 28.65714 1.000000 100.0000 0.3000000E-01 2 1.0000 0
- 37 400.0000 5.000000 0.2428571E-02 27.68571 1.000000 100.0000 0.3000000E-01 1 1.0000 0
+#ifno rlen rwid rgrd rtp rbth rhk man ncon ustrf ndv
+ 0 400.0000 5.000000 0.1818182E-02 48.63636 1.000000 100.0000 0.3000000E-01 1 1.0000 0
+ 1 200.0000 5.000000 0.1818182E-02 48.09091 1.000000 100.0000 0.3000000E-01 2 1.0000 0
+ 2 400.0000 5.000000 0.1818182E-02 47.54546 1.000000 100.0000 0.3000000E-01 2 1.0000 0
+ 3 400.0000 5.000000 0.1818182E-02 46.81818 1.000000 100.0000 0.3000000E-01 2 1.0000 0
+ 4 400.0000 5.000000 0.1818182E-02 46.09091 1.000000 100.0000 0.3000000E-01 2 1.0000 0
+ 5 400.0000 5.000000 0.1818182E-02 45.36364 1.000000 100.0000 0.3000000E-01 1 1.0000 0
+ 6 400.0000 5.000000 0.2187500E-02 44.06250 1.000000 100.0000 0.3000000E-01 1 1.0000 0
+ 7 400.0000 5.000000 0.2187500E-02 43.18750 1.000000 100.0000 0.3000000E-01 2 1.0000 0
+ 8 400.0000 5.000000 0.2187500E-02 42.31250 1.000000 100.0000 0.3000000E-01 2 1.0000 0
+ 9 400.0000 5.000000 0.2187500E-02 41.43750 1.000000 100.0000 0.3000000E-01 2 1.0000 0
+ 10 400.0000 5.000000 0.2187500E-02 40.56250 1.000000 100.0000 0.3000000E-01 2 1.0000 0
+ 11 400.0000 5.000000 0.2187500E-02 39.68750 1.000000 100.0000 0.3000000E-01 2 1.0000 0
+ 12 400.0000 5.000000 0.2187500E-02 38.81250 1.000000 100.0000 0.3000000E-01 2 1.0000 0
+ 13 400.0000 5.000000 0.2187500E-02 37.93750 1.000000 100.0000 0.3000000E-01 2 1.0000 0
+ 14 400.0000 5.000000 0.2187500E-02 37.06250 1.000000 100.0000 0.3000000E-01 2 1.0000 0
+ 15 400.0000 5.000000 0.2187500E-02 36.18750 1.000000 100.0000 0.3000000E-01 2 1.0000 0
+ 16 400.0000 5.000000 0.2187500E-02 35.31250 1.000000 100.0000 0.3000000E-01 2 1.0000 0
+ 17 400.0000 5.000000 0.2187500E-02 34.43750 1.000000 100.0000 0.3000000E-01 2 1.0000 0
+ 18 400.0000 5.000000 0.1704545E-02 41.15909 1.000000 100.0000 0.3000000E-01 1 1.0000 0
+ 19 200.0000 5.000000 0.1704545E-02 40.64773 1.000000 100.0000 0.3000000E-01 2 1.0000 0
+ 20 200.0000 5.000000 0.1704545E-02 40.30682 1.000000 100.0000 0.3000000E-01 2 1.0000 0
+ 21 400.0000 5.000000 0.1704545E-02 39.79546 1.000000 100.0000 0.3000000E-01 2 1.0000 0
+ 22 400.0000 5.000000 0.1704545E-02 39.11364 1.000000 100.0000 0.3000000E-01 2 1.0000 0
+ 23 400.0000 5.000000 0.1704545E-02 38.43182 1.000000 100.0000 0.3000000E-01 2 1.0000 0
+ 24 400.0000 5.000000 0.1704545E-02 37.75000 1.000000 100.0000 0.3000000E-01 2 1.0000 0
+ 25 200.0000 5.000000 0.1704545E-02 37.23864 1.000000 100.0000 0.3000000E-01 2 1.0000 0
+ 26 200.0000 5.000000 0.1704545E-02 36.89773 1.000000 100.0000 0.3000000E-01 2 1.0000 0
+ 27 400.0000 5.000000 0.1704545E-02 36.38636 1.000000 100.0000 0.3000000E-01 2 1.0000 0
+ 28 400.0000 5.000000 0.1704545E-02 35.70454 1.000000 100.0000 0.3000000E-01 2 1.0000 0
+ 29 400.0000 5.000000 0.1704545E-02 35.02273 1.000000 100.0000 0.3000000E-01 2 1.0000 0
+ 30 400.0000 5.000000 0.1704545E-02 34.34091 1.000000 100.0000 0.3000000E-01 2 1.0000 0
+ 31 400.0000 5.000000 0.2428571E-02 33.51429 1.000000 100.0000 0.3000000E-01 3 1.0000 0
+ 32 400.0000 5.000000 0.2428571E-02 32.54286 1.000000 100.0000 0.3000000E-01 2 1.0000 0
+ 33 400.0000 5.000000 0.2428571E-02 31.57143 1.000000 100.0000 0.3000000E-01 2 1.0000 0
+ 34 400.0000 5.000000 0.2428571E-02 30.60000 1.000000 100.0000 0.3000000E-01 2 1.0000 0
+ 35 400.0000 5.000000 0.2428571E-02 29.62857 1.000000 100.0000 0.3000000E-01 2 1.0000 0
+ 36 400.0000 5.000000 0.2428571E-02 28.65714 1.000000 100.0000 0.3000000E-01 2 1.0000 0
+ 37 400.0000 5.000000 0.2428571E-02 27.68571 1.000000 100.0000 0.3000000E-01 1 1.0000 0
diff --git a/autotest/framework.py b/autotest/framework.py
index 6e4e89c0832..8bfd0855169 100644
--- a/autotest/framework.py
+++ b/autotest/framework.py
@@ -1,53 +1,798 @@
+import os
+import shutil
+import time
+from itertools import repeat
+from pathlib import Path
+from subprocess import PIPE, STDOUT, Popen
+from traceback import format_exc
+from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union
+from warnings import warn
+
import flopy
+import numpy as np
+from common_regression import (
+ COMPARE_PROGRAMS,
+ adjust_htol,
+ get_mf6_comparison,
+ get_mf6_files,
+ get_namefiles,
+ get_rclose,
+ get_regression_files,
+ setup_mf6,
+ setup_mf6_comparison,
+)
+from flopy.mbase import BaseModel
+from flopy.mf6 import MFSimulation
+from flopy.utils.compare import compare_heads
+from modflow_devtools.misc import get_ostag, is_in_ci
+
+DNODATA = 3.0e30
+EXTTEXT = {
+ "hds": "head",
+ "hed": "head",
+ "bhd": "head",
+ "ucn": "concentration",
+ "cbc": "cell-by-cell",
+}
+HDS_EXT = (
+ "hds",
+ "hed",
+ "bhd",
+ "ahd",
+ "bin",
+)
+CBC_EXT = (
+ "cbc",
+ "bud",
+)
+
+
+def api_return(success, model_ws) -> Tuple[bool, List[str]]:
+ """
+ parse libmf6 stdout shared object file
+ """
+ fpth = os.path.join(model_ws, "mfsim.stdout")
+ return success, open(fpth).readlines()
+
+
+def get_mfsim_lst_tail(path: os.PathLike, lines=100) -> str:
+ """Get the tail of the mfsim.lst listing file"""
+ msg = ""
+ _lines = open(path).read().splitlines()
+ msg = "\n" + 79 * "-" + "\n"
+ i0 = -lines if len(_lines) > lines else 0
+ for line in _lines[i0:]:
+ if len(line) > 0:
+ msg += f"{line}\n"
+ msg += 79 * "-" + "\n\n"
+ return msg
+
+
+def get_workspace(sim_or_model) -> Path:
+ if isinstance(sim_or_model, MFSimulation):
+ return sim_or_model.sim_path
+ elif isinstance(sim_or_model, BaseModel):
+ return Path(sim_or_model.model_ws)
+ else:
+ raise ValueError(f"Unsupported model type: {type(sim_or_model)}")
+
+
+def run_parallel(workspace, target, ncpus) -> Tuple[bool, List[str]]:
+ if not is_in_ci() and get_ostag() in ["mac"]:
+ oversubscribed = ["--hostfile", "localhost"]
+ with open(f"{workspace}/localhost", "w") as f:
+ f.write(f"localhost slots={ncpus}\n")
+ else:
+ oversubscribed = ["--oversubscribe"]
+
+ normal_msg = "normal termination"
+ success = False
+ nr_success = 0
+ buff = []
+
+ # parallel commands
+ mpiexec_cmd = (
+ ["mpiexec"] + oversubscribed + ["-np", str(ncpus), target, "-p"]
+ )
+
+ proc = Popen(mpiexec_cmd, stdout=PIPE, stderr=STDOUT, cwd=workspace)
+
+ while True:
+ line = proc.stdout.readline().decode("utf-8")
+ if line == "" and proc.poll() is not None:
+ break
+ if line:
+ # success is when the success message appears
+ # in every process of the parallel simulation
+ if normal_msg in line.lower():
+ nr_success += 1
+ if nr_success == ncpus:
+ success = True
+ line = line.rstrip("\r\n")
+ print(line)
+ buff.append(line)
+ else:
+ break
+
+ return success, buff
+
+
+def write_input(*sims, overwrite: bool = True, verbose: bool = True):
+ """
+ Write input files for `flopy.mf6.MFSimulation` or `flopy.mbase.BaseModel`.
+
+ Parameters
+ ----------
+
+ sims : arbitrary list
+ Simulations or models
+ verbose : bool, optional
+ whether to show verbose output
+ """
+
+ if sims is None:
+ warn("No simulations or models!")
+ return
+
+ # write input files for each model or simulation
+ for sim in sims:
+ if sim is None:
+ continue
+
+ if isinstance(sim, flopy.mf6.MFSimulation):
+ workspace = Path(sim.sim_path)
+ if any(workspace.glob("*")) and not overwrite:
+ warn(f"Workspace is not empty, not writing input files")
+ return
+ if verbose:
+ print(
+ f"Writing mf6 simulation '{sim.name}' to: {sim.sim_path}"
+ )
+ sim.write_simulation()
+ elif isinstance(sim, flopy.mbase.BaseModel):
+ workspace = Path(sim.model_ws)
+ if any(workspace.glob("*")) and not overwrite:
+ warn(f"Workspace is not empty, not writing input files")
+ return
+ if verbose:
+ print(
+ f"Writing {type(sim)} model '{sim.name}' to: {sim.model_ws}"
+ )
+ sim.write_input()
+ else:
+ raise ValueError(f"Unsupported simulation/model type: {type(sim)}")
class TestFramework:
- # tell pytest this isn't a test class, don't collect it
+ """
+ Defines a MODFLOW 6 test and its lifecycle, with configurable
+ hooks to evaluate results or run other models for comparison:
+
+ - MODFLOW 6 (directly or via API)
+ - MODFLOW-2005
+ - MODFLOW-NWT
+ - MODFLOW-USG
+ - MODFLOW-LGR
+
+ Parameters
+ ----------
+ name : str
+ The test name
+ workspace : pathlike
+ The test workspace
+ targets : dict
+ Binary targets to test against. Development binaries are
+ required, downloads/rebuilt binaries are optional (if not
+ found, comparisons and regression tests will be skipped).
+ Dictionary maps target names to paths. The test framework
+ will refuse to run a program if it is not a known target.
+ build : function, optional
+ User defined function returning one or more simulations/models.
+ Takes `self` as input. This is the place to build simulations.
+ If no build function is provided, input files must be written
+ to the test `workspace` prior to calling `run()`.
+ check : function, optional
+ User defined function to evaluate results of the simulation.
+ Takes `self` as input. This is a good place for assertions.
+ parallel : bool, optional
+ Whether to test mf6 parallel capabilities.
+ ncpus : int, optional
+ Number of CPUs for mf6 parallel testing.
+ htol : float, optional
+ Tolerance for result comparisons.
+ rclose : float, optional
+ Residual tolerance for convergence
+ verbose: bool, optional
+ Whether to show verbose output
+ xfail : bool, optional
+ Whether the test is expected to fail
+ api_func: function, optional
+ User defined function invoking the MODFLOW API, accepting
+ the MF6 library path and the test workspace as parameters
+ compare: str, optional
+ String selecting the comparison executable. Must be a key
+ into the `targets` dictionary, i.e. the name of a program
+ to use for the comparison model. Acceptable values: auto,
+ mf6, mf6_regression, libmf6, mf2005, mfnwt, mflgr, mfnwt.
+ If 'auto', the program to use is determined automatically
+ by contents of the comparison model/simulation workspace.
+ """
+
+ # tell pytest this class doesn't contain tests, don't collect it
__test__ = False
- def build(self, build_function, idx, exdir):
+ def __init__(
+ self,
+ name: str,
+ workspace: Union[str, os.PathLike],
+ targets: Dict[str, Path],
+ api_func: Optional[Callable] = None,
+ build: Optional[Callable] = None,
+ check: Optional[Callable] = None,
+ compare: Optional[str] = "auto",
+ parallel=False,
+ ncpus=1,
+ htol=None,
+ rclose=None,
+ overwrite=True,
+ verbose=False,
+ xfail=False,
+ ):
+ # make sure workspace exists
+ workspace = Path(workspace).expanduser().absolute()
+ assert workspace.is_dir(), f"{workspace} is not a valid directory"
+ if verbose:
+ print("Initializing test", name, "in workspace", workspace)
+
+ self.name = name
+ self.workspace = workspace
+ self.targets = targets
+ self.build = build
+ self.check = check
+ self.parallel = parallel
+ self.ncpus = [ncpus] if isinstance(ncpus, int) else ncpus
+ self.api_func = api_func
+ self.compare = compare
+ self.outp = None
+ self.htol = 0.001 if htol is None else htol
+ self.rclose = 0.001 if rclose is None else rclose
+ self.overwrite = overwrite
+ self.verbose = verbose
+ self.xfail = [xfail] if isinstance(xfail, bool) else xfail
+
+ def __repr__(self):
+ return self.name
+
+ # private
+
+ def _compare_heads(
+ self, cpth=None, extensions="hds", mf6=False, htol=0.001
+ ) -> bool:
+ if isinstance(extensions, str):
+ extensions = [extensions]
+
+ if cpth:
+ files1 = []
+ files2 = []
+ exfiles = []
+ for file1 in self.outp:
+ ext = os.path.splitext(file1)[1][1:]
+ if ext.lower() in extensions:
+ # simulation file
+ pth = os.path.join(self.workspace, file1)
+ files1.append(pth)
+
+ # look for an exclusion file
+ pth = os.path.join(self.workspace, file1 + ".ex")
+ exfiles.append(pth if os.path.isfile(pth) else None)
+
+ # look for a comparison file
+ coutp = None
+ if mf6:
+ _, coutp = get_mf6_files(cpth / "mfsim.nam")
+ if coutp is not None:
+ for file2 in coutp:
+ ext = os.path.splitext(file2)[1][1:]
+ if ext.lower() in extensions:
+ files2.append(os.path.join(cpth, file2))
+ else:
+ files2.append(None)
+
+ if self.cmp_namefile is None:
+ pth = None
+ else:
+ pth = os.path.join(cpth, self.cmp_namefile)
+
+ for i in range(len(files1)):
+ file1 = files1[i]
+ ext = os.path.splitext(file1)[1][1:].lower()
+ outfile = os.path.splitext(os.path.basename(file1))[0]
+ outfile = os.path.join(
+ self.workspace, outfile + "." + ext + ".cmp.out"
+ )
+ file2 = None if files2 is None else files2[i]
+
+ # set exfile
+ exfile = None
+ if file2 is None:
+ if len(exfiles) > 0:
+ exfile = exfiles[i]
+ if exfile is not None:
+ print(
+ f"Exclusion file {i + 1}",
+ os.path.basename(exfile),
+ )
+
+ # make comparison
+ success = compare_heads(
+ None,
+ pth,
+ precision="double",
+ text=EXTTEXT[ext],
+ outfile=outfile,
+ files1=file1,
+ files2=file2,
+ htol=htol,
+ difftol=True,
+ verbose=self.verbose,
+ exfile=exfile,
+ )
+ print(f"{EXTTEXT[ext]} comparison {i + 1}", self.name)
+ if not success:
+ return False
+ return True
+
+ # otherwise it's a regression comparison
+ files0, files1 = get_regression_files(self.workspace, extensions)
+ extension = "hds"
+ for i, (fpth0, fpth1) in enumerate(zip(files0, files1)):
+ outfile = os.path.splitext(os.path.basename(fpth0))[0]
+ outfile = os.path.join(
+ self.workspace, outfile + f".{extension}.cmp.out"
+ )
+ success = compare_heads(
+ None,
+ None,
+ precision="double",
+ htol=htol,
+ text=EXTTEXT[extension],
+ outfile=outfile,
+ files1=fpth0,
+ files2=fpth1,
+ verbose=self.verbose,
+ )
+ print(
+ (
+ f"{EXTTEXT[extension]} comparison {i + 1}"
+ + f"{self.name} ({os.path.basename(fpth0)})"
+ )
+ )
+ if not success:
+ return False
+ return True
+
+ def _compare_concentrations(self, extensions="ucn", htol=0.001) -> bool:
+ if isinstance(extensions, str):
+ extensions = [extensions]
+
+ files0, files1 = get_regression_files(self.workspace, extensions)
+ extension = "ucn"
+ for i, (fpth0, fpth1) in enumerate(zip(files0, files1)):
+ outfile = os.path.splitext(os.path.basename(fpth0))[0]
+ outfile = os.path.join(
+ self.workspace, outfile + f".{extension}.cmp.out"
+ )
+ success = compare_heads(
+ None,
+ None,
+ precision="double",
+ htol=htol,
+ text=EXTTEXT[extension],
+ outfile=outfile,
+ files1=fpth0,
+ files2=fpth1,
+ verbose=self.verbose,
+ )
+ print(
+ (
+ f"{EXTTEXT[extension]} comparison {i + 1}"
+ + f"{self.name} ({os.path.basename(fpth0)})",
+ )
+ )
+ if not success:
+ return False
+ return True
+
+ def _compare_budgets(self, extensions="cbc", rclose=0.001) -> bool:
+ if isinstance(extensions, str):
+ extensions = [extensions]
+ files0, files1 = get_regression_files(self.workspace, extensions)
+ extension = "cbc"
+ for i, (fpth0, fpth1) in enumerate(zip(files0, files1)):
+ print(
+ f"{EXTTEXT[extension]} comparison {i + 1}",
+ f"{self.name} ({os.path.basename(fpth0)})",
+ )
+ success = self._compare_budget_files(
+ extension, fpth0, fpth1, rclose
+ )
+ if not success:
+ return False
+ return True
+
+ def _compare_budget_files(
+ self, extension, fpth0, fpth1, rclose=0.001
+ ) -> bool:
+ success = True
+ if os.stat(fpth0).st_size * os.stat(fpth0).st_size == 0:
+ return success, ""
+ outfile = os.path.splitext(os.path.basename(fpth0))[0]
+ outfile = os.path.join(
+ self.workspace, outfile + f".{extension}.cmp.out"
+ )
+ fcmp = open(outfile, "w")
+ fcmp.write("Performing CELL-BY-CELL to CELL-BY-CELL comparison\n")
+ fcmp.write(f"{fpth0}\n")
+ fcmp.write(f"{fpth1}\n\n")
+
+ # open the files
+ cbc0 = flopy.utils.CellBudgetFile(
+ fpth0, precision="double", verbose=self.verbose
+ )
+ cbc1 = flopy.utils.CellBudgetFile(
+ fpth1, precision="double", verbose=self.verbose
+ )
+
+ # build list of cbc data to retrieve
+ avail0 = cbc0.get_unique_record_names()
+ avail1 = cbc1.get_unique_record_names()
+ avail0 = [t.decode().strip() for t in avail0]
+ avail1 = [t.decode().strip() for t in avail1]
+
+ # initialize list for storing totals for each budget term terms
+ cbc_keys0 = []
+ cbc_keys1 = []
+ for t in avail0:
+ t1 = t
+ if t not in avail1:
+ # check if RCHA or EVTA is available and use that instead
+ # should be able to remove this once v6.3.0 is released
+ if t[:-1] in avail1:
+ t1 = t[:-1]
+ else:
+ raise Exception(f"Could not find {t} in {fpth1}")
+ cbc_keys0.append(t)
+ cbc_keys1.append(t1)
+
+ # get list of times and kstpkper
+ kk = cbc0.get_kstpkper()
+ times = cbc0.get_times()
+
+ # process data
+ for key, key1 in zip(cbc_keys0, cbc_keys1):
+ for idx, (k, t) in enumerate(zip(kk, times)):
+ v0 = cbc0.get_data(kstpkper=k, text=key)[0]
+ v1 = cbc1.get_data(kstpkper=k, text=key1)[0]
+ if v0.dtype.names is not None:
+ v0 = v0["q"]
+ v1 = v1["q"]
+ # skip empty vectors
+ if v0.size < 1:
+ continue
+ vmin = rclose
+ if vmin < 1e-6:
+ vmin = 1e-6
+ vmin_tol = 5.0 * vmin
+ if v0.shape != v1.shape:
+ v0 = v0.flatten()
+ v1 = v1.flatten()
+ idx = (abs(v0) > vmin) & (abs(v1) > vmin)
+ diff = np.zeros(v0.shape, dtype=v0.dtype)
+ diff[idx] = abs(v0[idx] - v1[idx])
+ diffmax = diff.max()
+ indices = np.where(diff == diffmax)[0]
+ if diffmax > vmin_tol:
+ success = False
+ msg = (
+ f"{os.path.basename(fpth0)} - "
+ + f"{key:16s} "
+ + f"difference ({diffmax:10.4g}) "
+ + f"> {vmin_tol:10.4g} "
+ + f"at {indices.size} nodes "
+ + f" [first location ({indices[0] + 1})] "
+ + f"at time {t} "
+ )
+ fcmp.write(f"{msg}\n")
+ if self.verbose:
+ print(msg)
+
+ fcmp.close()
+ return success
+
+ # public
+
+ def setup(self, src, dst):
+ print("Setting up MF6 test", self.name)
+ print(" Source:", src)
+ print(" Destination:", dst)
+ self.workspace = dst
+
+ # setup workspace and expected output files
+ _, self.outp = setup_mf6(src=src, dst=dst)
+ print("waiting...")
+ time.sleep(0.5)
+
+ if self.compare == "mf6_regression":
+ shutil.copytree(self.workspace, self.workspace / self.compare)
+ else:
+ self.compare = get_mf6_comparison(src) # detect comparison
+ setup_mf6_comparison(src, dst, self.compare, overwrite=True)
+
+ def run_sim_or_model(
+ self,
+ workspace: Union[str, os.PathLike],
+ target: Union[str, os.PathLike],
+ xfail: bool = False,
+ ncpus: int = 1,
+ ) -> Tuple[bool, List[str]]:
"""
- Build base and regression MODFLOW 6 models
-
- Parameters
- ----------
- build_function : function
- user defined function that builds a base model and optionally
- builds a regression model. If a regression model is not built
- then None must be returned from the function for the regression
- model.
- idx : int
- counter that corresponds to exdir entry
- exdir : str
- path to regression model files
+ Run a simulation or model with FloPy.
+
+ workspace : str or path-like
+ The simulation or model workspace
+ target : str or path-like
+ The target executable to use
+ xfail : bool
+ Whether to expect failure
+ ncpus : int
+ The number of CPUs for a parallel run
"""
- base, regression = build_function(idx, exdir)
- base.write_simulation()
- if regression is not None:
- if isinstance(regression, flopy.mf6.MFSimulation):
- regression.write_simulation()
+
+ # make sure workspace exists
+ workspace = Path(workspace).expanduser().absolute()
+ assert workspace.is_dir(), f"Workspace not found: {workspace}"
+
+ # make sure executable exists and framework knows about it
+ tgt = Path(shutil.which(target))
+ assert tgt.is_file(), f"Target executable not found: {target}"
+ assert (
+ tgt in self.targets.values()
+ ), f"Targets must be explicitly registered with the test framework"
+
+ if self.verbose:
+ print(f"Running {target} in {workspace}")
+
+ # needed in _compare_heads()... todo: inject explicitly?
+ nf = next(iter(get_namefiles(workspace)), None)
+ self.cmp_namefile = (
+ None
+ if "mf6" in target.name or "libmf6" in target.name
+ else os.path.basename(nf)
+ if nf
+ else None
+ )
+
+ # run the model
+ try:
+ # via MODFLOW API
+ if "libmf6" in target.name and self.api_func:
+ success, buff = self.api_func(target, workspace)
+ # via MF6 executable
+ elif "mf6" in target.name:
+ # parallel test if configured
+ if self.parallel and ncpus > 1:
+ print(
+ f"Parallel test {self.name} on {self.ncpus} processes"
+ )
+ try:
+ success, buff = run_parallel(workspace, target, ncpus)
+ except Exception:
+ warn(
+ "MODFLOW 6 parallel test",
+ self.name,
+ f"failed with error:\n{format_exc()}",
+ )
+ success = False
+ else:
+ # otherwise serial run
+ try:
+ success, buff = flopy.run_model(
+ target,
+ self.workspace / "mfsim.nam",
+ model_ws=workspace,
+ report=True,
+ )
+ except Exception:
+ warn(
+ "MODFLOW 6 serial test",
+ self.name,
+ f"failed with error:\n{format_exc()}",
+ )
+ success = False
else:
- regression.write_input()
+ # non-MF6 model
+ try:
+ success, buff = flopy.run_model(
+ target, self.cmp_namefile, workspace, report=True
+ )
+ except Exception:
+ warn(f"{target} model failed:\n{format_exc()}")
+ success = False
- def run(self, sim, workspace=None):
+ if xfail:
+ if success:
+ warn("MODFLOW 6 model should have failed!")
+ success = False
+ else:
+ success = True
+
+ lst_file_path = Path(workspace) / "mfsim.lst"
+ if (
+ "mf6" in target.name
+ and not success
+ and lst_file_path.is_file()
+ ):
+ warn(
+ "MODFLOW 6 listing file ended with: \n"
+ + get_mfsim_lst_tail(lst_file_path)
+ )
+ except Exception:
+ success = False
+ warn(
+ f"Unhandled error in comparison model {self.name}:\n{format_exc()}"
+ )
+
+ return success, buff
+
+ def compare_output(self, compare):
"""
- Run the MODFLOW 6 simulation and compare to existing head file or
- appropriate MODFLOW-2005, MODFLOW-NWT, MODFLOW-USG, or MODFLOW-LGR run.
-
- Parameters
- ----------
- sim : Simulation object
- MODFLOW 6 autotest simulation object that runs the base and
- regression models, compares the results, and tears down the
- test if successful.
- workspace : str
- The path to the workspace where the test is run.
+ Compare the main simulation's output with that of another simulation or model.
+
+ compare : str
+ The comparison executable name: mf6, mf6_regression, libmf6, mf2005,
+ mfnwt, mflgr, or mfusg.
+ """
+
+ if compare not in COMPARE_PROGRAMS:
+ raise ValueError(f"Unsupported comparison program: {compare}")
+
+ if self.verbose:
+ print("Comparison test", self.name)
+
+ # adjust htol if < IMS outer_dvclose, and rclose for budget comparisons
+ htol = adjust_htol(self.workspace, self.htol)
+ rclose = get_rclose(self.workspace)
+ cmp_path = self.workspace / compare
+ if "mf6_regression" in compare:
+ assert self._compare_heads(
+ extensions=HDS_EXT, htol=htol
+ ), "head comparison failed"
+ assert self._compare_budgets(
+ extensions=CBC_EXT, rclose=rclose
+ ), "budget comparison failed"
+ assert self._compare_concentrations(
+ htol=htol
+ ), "concentration comparison failed"
+ else:
+ assert self._compare_heads(
+ cpth=cmp_path,
+ extensions=HDS_EXT,
+ mf6="mf6" in compare,
+ htol=htol,
+ ), "head comparison failed"
+
+ def run(self):
+ """
+ Run the test case end-to-end.
+
"""
- sim.set_model(
- sim.name if workspace is None else workspace, testModel=False
+ # if build fn provided, build models/simulations and write input files
+ if self.build:
+ sims = self.build(self)
+ sims = sims if isinstance(sims, Iterable) else [sims]
+ sims = [sim for sim in sims if sim] # filter Nones
+ self.sims = sims
+ nsims = len(sims)
+ self.buffs = list(repeat(None, nsims))
+
+ assert len(self.xfail) in [
+ 1,
+ nsims,
+ ], f"Invalid xfail: expected a single boolean or one for each model"
+ if len(self.xfail) == 1 and nsims:
+ self.xfail = list(repeat(self.xfail[0], nsims))
+
+ assert len(self.ncpus) in [
+ 1,
+ nsims,
+ ], f"Invalid ncpus: expected a single integer or one for each model"
+ if len(self.ncpus) == 1 and nsims:
+ self.ncpus = list(repeat(self.ncpus[0], nsims))
+
+ write_input(*sims, overwrite=self.overwrite, verbose=self.verbose)
+ else:
+ self.sims = [MFSimulation.load(sim_ws=self.workspace)]
+ self.buffs = [None]
+ assert (
+ len(self.xfail) == 1
+ ), f"Invalid xfail: expected a single boolean"
+ assert (
+ len(self.ncpus) == 1
+ ), f"Invalid ncpus: expected a single integer"
+
+ # run models/simulations
+ for i, sim_or_model in enumerate(self.sims):
+ tgts = self.targets
+ workspace = get_workspace(sim_or_model)
+ exe_path = (
+ Path(sim_or_model.exe_name)
+ if sim_or_model.exe_name
+ else tgts["mf6"]
+ )
+ target = (
+ exe_path
+ if exe_path in tgts.values()
+ else tgts.get(exe_path.stem, tgts["mf6"])
+ )
+ xfail = self.xfail[i]
+ ncpus = self.ncpus[i]
+ success, buff = self.run_sim_or_model(
+ workspace, target, xfail, ncpus
+ )
+ self.buffs[i] = buff # store model output for assertions later
+ assert success, (
+ f"{'Simulation' if 'mf6' in str(target) else 'Model'} "
+ f"{'should have failed' if xfail else 'failed'}: {workspace}"
+ )
+
+ # get expected output files from main simulation
+ _, self.outp = get_mf6_files(
+ self.workspace / "mfsim.nam", self.verbose
)
- sim.run()
- sim.compare()
- if sim.exfunc is not None:
- sim.exfunc(sim)
+
+ # setup and run comparison model(s), if enabled
+ if self.compare:
+ # try to autodetect comparison type if enabled
+ if self.compare == "auto":
+ if self.verbose:
+ print("Auto-detecting comparison type")
+ self.compare = get_mf6_comparison(self.workspace)
+ if self.compare:
+ if self.verbose:
+ print(f"Using comparison type: {self.compare}")
+
+ # copy simulation files to comparison workspace if mf6 regression
+ if self.compare == "mf6_regression":
+ cmp_path = self.workspace / self.compare
+ if os.path.isdir(cmp_path):
+ if self.verbose:
+ print(f"Cleaning {cmp_path}")
+ shutil.rmtree(cmp_path)
+ if self.verbose:
+ print(
+ f"Copying simulation files from {self.workspace} to {cmp_path}"
+ )
+ shutil.copytree(self.workspace, cmp_path)
+
+ # run comparison simulation if libmf6 or mf6 regression
+ if self.compare in ["mf6_regression", "libmf6"]:
+ # todo: don't hardcode workspace or assume agreement with test case
+ # simulation workspace, set & access simulation workspaces directly
+ workspace = self.workspace / self.compare
+ success, _ = self.run_sim_or_model(
+ workspace,
+ self.targets.get(self.compare, self.targets["mf6"]),
+ )
+ assert success, f"Comparison model failed: {workspace}"
+
+ # compare model results, if enabled
+ if self.verbose:
+ print("Comparing outputs")
+ self.compare_output(self.compare)
+
+ # check results, if enabled
+ if self.check:
+ if self.verbose:
+ print("Checking outputs")
+ self.check(self)
diff --git a/autotest/get_exes.py b/autotest/get_exes.py
index 0a071d50fea..d5c789ceb11 100644
--- a/autotest/get_exes.py
+++ b/autotest/get_exes.py
@@ -1,15 +1,18 @@
import argparse
+from os import environ
from pathlib import Path
+from platform import system
from tempfile import TemporaryDirectory
from warnings import warn
import flopy
import pytest
-from conftest import project_root_path
from flaky import flaky
from modflow_devtools.build import meson_build
from modflow_devtools.download import download_and_unzip, get_release
-from modflow_devtools.misc import get_ostag
+from modflow_devtools.misc import get_ostag, is_in_ci, set_env
+
+from conftest import project_root_path
repository = "MODFLOW-USGS/modflow6"
top_bin_path = project_root_path / "bin"
@@ -73,11 +76,21 @@ def test_rebuild_release(rebuilt_bin_path: Path):
f.write(f"{line}\n")
# rebuild with Meson
- meson_build(
- project_path=source_files_path.parent,
- build_path=download_path / "builddir",
- bin_path=rebuilt_bin_path,
- )
+ def rebuild():
+ meson_build(
+ project_path=source_files_path.parent,
+ build_path=download_path / "builddir",
+ bin_path=rebuilt_bin_path,
+ )
+
+ # temp workaround until next release,
+ # ifx fails to build 6.4.2 on Windows
+ # most likely due to backspace issues
+ if system() == "Windows" and environ.get("FC") == "ifx":
+ with set_env(FC="ifort", CC="icl"):
+ rebuild()
+ else:
+ rebuild()
@flaky(max_runs=3)
diff --git a/autotest/meson.build b/autotest/meson.build
new file mode 100644
index 00000000000..a36d6e1e82e
--- /dev/null
+++ b/autotest/meson.build
@@ -0,0 +1,34 @@
+test_drive = dependency('test-drive', required : false)
+if test_drive.found() and not fc_id.contains('intel')
+ tests = [
+ 'ArrayHandlers',
+ 'DevFeature',
+ 'GeomUtil',
+ 'HashTable',
+ 'InputOutput',
+ 'List',
+ 'MathUtil',
+ 'Message',
+ 'Sim'
+ ]
+
+ test_srcs = files(
+ 'tester.f90',
+ )
+ foreach t : tests
+ test_srcs += files('Test@0@.f90'.format(t.underscorify()))
+ endforeach
+
+ tester = executable(
+ 'tester',
+ sources: test_srcs,
+ link_with: mf6core,
+ dependencies: test_drive,
+ )
+
+ test('Test source modules', tester)
+
+ foreach t : tests
+ test(t, tester, args: t)
+ endforeach
+endif
\ No newline at end of file
diff --git a/autotest/pytest.ini b/autotest/pytest.ini
index bc9cdb0cc74..b8e144f9f5c 100644
--- a/autotest/pytest.ini
+++ b/autotest/pytest.ini
@@ -1,4 +1,5 @@
[pytest]
+addopts = --color=yes
python_files =
test_*.py
*_test*.py
diff --git a/autotest/simulation.py b/autotest/simulation.py
deleted file mode 100644
index 73516fbfdee..00000000000
--- a/autotest/simulation.py
+++ /dev/null
@@ -1,784 +0,0 @@
-import os
-import shutil
-import sys
-import time
-from traceback import format_exc
-from subprocess import PIPE, STDOUT, Popen
-
-import flopy
-import numpy as np
-from common_regression import (
- get_mf6_comparison,
- get_mf6_files,
- get_namefiles,
- setup_mf6,
- setup_mf6_comparison,
-)
-from flopy.utils.compare import compare_heads
-from modflow_devtools.misc import is_in_ci
-
-sfmt = "{:25s} - {}"
-extdict = {
- "hds": "head",
- "hed": "head",
- "bhd": "head",
- "ucn": "concentration",
- "cbc": "cell-by-cell",
-}
-
-
-class TestSimulation:
- # tell pytest this isn't a test class, don't collect it
- __test__ = False
-
- def __init__(
- self,
- name,
- parallel=False,
- ncpus=1,
- exfunc=None,
- exe_dict=None,
- htol=None,
- pdtol=None,
- rclose=None,
- idxsim=None,
- cmp_verbose=True,
- require_failure=None,
- api_func=None,
- mf6_regression=False,
- make_comparison=True,
- simpath=None,
- ):
- msg = sfmt.format("Initializing test", name)
- print(msg)
-
- self.name = name
- self.parallel = parallel
- self.ncpus = ncpus
- self.exfunc = exfunc
- self.targets = exe_dict
- self.simpath = simpath
- self.inpt = None
- self.outp = None
- self.coutp = None
- self.api_func = api_func
- self.mf6_regression = mf6_regression
- self.make_comparison = make_comparison
- self.action = None
-
- # set htol for comparisons
- if htol is None:
- htol = 0.001
- else:
- msg = sfmt.format("User specified comparison htol", htol)
- print(msg)
-
- self.htol = htol
-
- # set pdtol for comparisons
- if pdtol is None:
- pdtol = 0.001
- else:
- msg = sfmt.format(
- "User specified percent difference comparison pdtol", pdtol
- )
- print(msg)
-
- self.pdtol = pdtol
-
- # set rclose for comparisons
- if rclose is None:
- rclose = 0.001
- else:
- msg = sfmt.format(
- "User specified percent difference comparison rclose", rclose
- )
- print(msg)
-
- self.rclose = rclose
-
- # set index for multi-simulation comparisons
- self.idxsim = idxsim
-
- # set compare verbosity
- self.cmp_verbose = cmp_verbose
-
- # set allow failure
- self.require_failure = require_failure
-
- self.success = False
-
- # set is_ci
- self.is_CI = is_in_ci()
-
- return
-
- def __repr__(self):
- return self.name
-
- def set_model(self, pth, testModel=True):
- """
- Set paths to MODFLOW 6 model and associated comparison test
- """
- # make sure this is a valid path
- if not os.path.isdir(pth):
- assert False, f"{pth} is not a valid directory"
-
- self.simpath = pth
-
- # get MODFLOW 6 output file names
- fpth = os.path.join(pth, "mfsim.nam")
- mf6inp, mf6outp = get_mf6_files(fpth)
- self.outp = mf6outp
-
- # determine comparison model
- self.setup_comparison(pth, pth, testModel=testModel)
- # if self.mf6_regression:
- # self.action = "mf6_regression"
- # else:
- # self.action = get_mf6_comparison(pth)
- if self.action is not None:
- if "mf6" in self.action or "mf6_regression" in self.action:
- cinp, self.coutp = get_mf6_files(fpth)
-
- def setup(self, src, dst):
- msg = sfmt.format("Setting up test workspace", self.name)
- print(msg)
- self.originpath = src
- self.simpath = dst
- try:
- self.inpt, self.outp = setup_mf6(src=src, dst=dst)
- print("waiting...")
- time.sleep(0.5)
- success = True
- except:
- success = False
- print(f"source: {src}")
- print(f"destination: {dst}")
- assert success, f"Failed to set up test workspace: {format_exc()}"
-
- if success:
- self.setup_comparison(src, dst)
-
- return
-
- def setup_comparison(self, src, dst, testModel=True):
-
- # evaluate if comparison should be made
- if not self.make_comparison:
- return
-
- # adjust htol if it is smaller than IMS outer_dvclose
- dvclose = self._get_dvclose(dst)
- if dvclose is not None:
- dvclose *= 5.0
- if self.htol < dvclose:
- self.htol = dvclose
-
- # get rclose to use with budget comparisons
- rclose = self._get_rclose(dst)
- if rclose is None:
- rclose = 0.5
- else:
- rclose *= 5.0
- self.rclose = rclose
-
- # Copy comparison simulations if available
- if self.mf6_regression:
- action = "mf6_regression"
- pth = os.path.join(dst, action)
- if os.path.isdir(pth):
- shutil.rmtree(pth)
- shutil.copytree(dst, pth)
- elif testModel:
- action = setup_mf6_comparison(src, dst, remove_existing=True)
- else:
- action = get_mf6_comparison(dst)
-
- self.action = action
-
- return
-
- def run(self):
- """
- Run the model and assert if the model terminated successfully
- """
- msg = sfmt.format("Run test", self.name)
- print(msg)
-
- # Set nam as namefile name without path
- nam = None
-
- # run mf6 models
- exe = str(self.targets["mf6"].absolute())
- msg = sfmt.format("using executable", exe)
- print(msg)
-
- if self.parallel:
- print("running parallel on", self.ncpus, "processes")
- try:
- success, buff = self.run_parallel(
- exe,
- )
- except Exception as exc:
- msg = sfmt.format("MODFLOW 6 run", self.name)
- print(msg)
- print(exc)
- success = False
- else:
- try:
- success, buff = flopy.run_model(
- exe,
- nam,
- model_ws=self.simpath,
- silent=False,
- report=True,
- )
- msg = sfmt.format("MODFLOW 6 run", self.name)
- if success:
- print(msg)
- else:
- print(msg)
- except:
- msg = sfmt.format("MODFLOW 6 run", self.name)
- print(msg)
- success = False
-
- # set failure based on success and require_failure setting
- if self.require_failure is None:
- msg = "MODFLOW 6 model did not terminate normally"
- if success:
- failure = False
- else:
- failure = True
- else:
- if self.require_failure:
- msg = "MODFLOW 6 model should have failed"
- if not success:
- failure = False
- else:
- failure = True
- else:
- msg = "MODFLOW 6 model should not have failed"
- if success:
- failure = False
- else:
- failure = True
-
- # print end of mfsim.lst to the screen
- if failure and self.is_CI:
- fpth = os.path.join(self.simpath, "mfsim.lst")
- msg = self._get_mfsim_listing(fpth) + msg
-
- # test for failure
- assert not failure, msg
-
- self.nam_cmp = None
- if success:
- if self.action is not None:
- if self.action.lower() == "compare":
- msg = sfmt.format("Comparison files", self.name)
- print(msg)
- else:
- cpth = os.path.join(self.simpath, self.action)
- key = self.action.lower().replace(".cmp", "")
- exe = str(self.targets[key].absolute())
- msg = sfmt.format("comparison executable", exe)
- print(msg)
- if (
- "mf6" in key
- or "libmf6" in key
- or "mf6_regression" in key
- ):
- nam = None
- else:
- npth = get_namefiles(cpth)[0]
- nam = os.path.basename(npth)
- self.nam_cmp = nam
- try:
- if self.api_func is None:
- success_cmp, buff = flopy.run_model(
- exe,
- nam,
- model_ws=cpth,
- silent=False,
- report=True,
- )
- else:
- success_cmp, buff = self.api_func(
- exe, self.idxsim, model_ws=cpth
- )
- msg = sfmt.format(
- "Comparison run", self.name + "/" + key
- )
- print(msg)
-
- # print end of mfsim.lst to the screen
- if "mf6" in key:
- if not success and self.is_CI:
- fpth = os.path.join(cpth, "mfsim.lst")
- print(self._get_mfsim_listing(fpth))
-
- except:
- success_cmp = False
- msg = sfmt.format(
- "Comparison run", self.name + "/" + key
- )
- print(msg)
-
- assert success_cmp, "Unsuccessful comparison run"
-
- return
-
- def run_parallel(self, exe):
- normal_msg="normal termination"
- success = False
- nr_success = 0
- buff = []
-
- mpiexec_cmd = ["mpiexec", "--oversubscribe", "-np", str(self.ncpus), exe, "-p"]
- proc = Popen(mpiexec_cmd, stdout=PIPE, stderr=STDOUT, cwd=self.simpath)
-
- while True:
- line = proc.stdout.readline().decode("utf-8")
- if line == "" and proc.poll() is not None:
- break
- if line:
- # success is when the success message appears
- # in every process of the parallel simulation
- if normal_msg in line.lower():
- nr_success = nr_success + 1
- if nr_success == self.ncpus:
- success = True
- line = line.rstrip("\r\n")
- print(line)
- buff.append(line)
- else:
- break
-
- return success, buff
-
-
- def compare(self):
- """
- Compare the model results
-
- """
- self.success = True
-
- # evaluate if comparison should be made
- if not self.make_comparison:
- return
-
- msgall = ""
- msg = sfmt.format("Comparison test", self.name)
- print(msg)
-
- if self.action is not None:
- cpth = os.path.join(self.simpath, self.action)
- files_cmp = None
- if self.action.lower() == "compare":
- files_cmp = []
- files = os.listdir(cpth)
- for file in files:
- files_cmp.append(file)
- elif "mf6" in self.action:
- fpth = os.path.join(cpth, "mfsim.nam")
- cinp, self.coutp = get_mf6_files(fpth)
-
- head_extensions = (
- "hds",
- "hed",
- "bhd",
- "ahd",
- "bin",
- )
- if "mf6_regression" in self.action:
- success, msgall = self._compare_heads(
- msgall,
- extensions=head_extensions,
- )
- if not success:
- self.success = False
- # non-regression runs - for new features
- else:
- files1 = []
- files2 = []
- exfiles = []
- ipos = 0
- for file1 in self.outp:
- ext = os.path.splitext(file1)[1][1:]
-
- if ext.lower() in head_extensions:
-
- # simulation file
- pth = os.path.join(self.simpath, file1)
- files1.append(pth)
-
- # look for an exclusion file
- pth = os.path.join(self.simpath, file1 + ".ex")
- if os.path.isfile(pth):
- exfiles.append(pth)
- else:
- exfiles.append(None)
-
- # Check to see if there is a corresponding compare file
- if files_cmp is not None:
-
- if file1 + ".cmp" in files_cmp:
- # compare file
- idx = files_cmp.index(file1 + ".cmp")
- pth = os.path.join(cpth, files_cmp[idx])
- files2.append(pth)
- txt = sfmt.format(
- f"Comparison file {ipos + 1}",
- os.path.basename(pth),
- )
- print(txt)
- else:
- if self.coutp is not None:
- for file2 in self.coutp:
- ext = os.path.splitext(file2)[1][1:]
-
- if ext.lower() in head_extensions:
- # simulation file
- pth = os.path.join(cpth, file2)
- files2.append(pth)
-
- else:
- files2.append(None)
-
- if self.nam_cmp is None:
- pth = None
- else:
- pth = os.path.join(cpth, self.nam_cmp)
-
- for ipos in range(len(files1)):
- file1 = files1[ipos]
- ext = os.path.splitext(file1)[1][1:].lower()
- outfile = os.path.splitext(os.path.basename(file1))[0]
- outfile = os.path.join(
- self.simpath, outfile + "." + ext + ".cmp.out"
- )
- if files2 is None:
- file2 = None
- else:
- file2 = files2[ipos]
-
- # set exfile
- exfile = None
- if file2 is None:
- if len(exfiles) > 0:
- exfile = exfiles[ipos]
- if exfile is not None:
- txt = sfmt.format(
- f"Exclusion file {ipos + 1}",
- os.path.basename(exfile),
- )
- print(txt)
-
- # make comparison
- success_tst = compare_heads(
- None,
- pth,
- precision="double",
- text=extdict[ext],
- outfile=outfile,
- files1=file1,
- files2=file2,
- htol=self.htol,
- difftol=True,
- # Change to true to have list of all nodes exceeding htol
- verbose=self.cmp_verbose,
- exfile=exfile,
- )
- msg = sfmt.format(
- f"{extdict[ext]} comparison {ipos + 1}",
- self.name,
- )
- print(msg)
-
- if not success_tst:
- self.success = False
- msgall += msg + " ... FAILED\n"
-
- # compare concentrations
- if "mf6_regression" in self.action:
- success, msgall = self._compare_concentrations(msgall)
- if not success:
- self.success = False
-
- # compare cbc files
- if "mf6_regression" in self.action:
- cbc_extensions = (
- "cbc",
- "bud",
- )
- success, msgall = self._compare_budgets(
- msgall, extensions=cbc_extensions
- )
- if not success:
- self.success = False
-
- assert self.success, msgall
- return
-
- def _get_mfsim_listing(self, lst_pth):
- """Get the tail of the mfsim.lst listing file"""
- msg = ""
- ilen = 100
- with open(lst_pth) as fp:
- lines = fp.read().splitlines()
- msg = "\n" + 79 * "-" + "\n"
- if len(lines) > ilen:
- i0 = -100
- else:
- i0 = 0
- for line in lines[i0:]:
- if len(line) > 0:
- msg += f"{line}\n"
- msg += 79 * "-" + "\n\n"
- return msg
-
- def _get_dvclose(self, dir_pth):
- """Get outer_dvclose value from MODFLOW 6 ims file"""
- dvclose = None
- files = os.listdir(dir_pth)
- for file_name in files:
- pth = os.path.join(dir_pth, file_name)
- if os.path.isfile(pth):
- if file_name.lower().endswith(".ims"):
- with open(pth) as f:
- lines = f.read().splitlines()
- for line in lines:
- if "outer_dvclose" in line.lower():
- v = float(line.split()[1])
- if dvclose is None:
- dvclose = v
- else:
- if v > dvclose:
- dvclose = v
- break
-
- return dvclose
-
- def _get_rclose(self, dir_pth):
- """Get inner_rclose value from MODFLOW 6 ims file"""
- rclose = None
- files = os.listdir(dir_pth)
- for file_name in files:
- pth = os.path.join(dir_pth, file_name)
- if os.path.isfile(pth):
- if file_name.lower().endswith(".ims"):
- with open(pth) as f:
- lines = f.read().splitlines()
- for line in lines:
- if "inner_rclose" in line.lower():
- v = float(line.split()[1])
- if rclose is None:
- rclose = v
- else:
- if v > rclose:
- rclose = v
- break
-
- return rclose
-
- def _regression_files(self, extensions):
- if isinstance(extensions, str):
- extensions = [extensions]
- files = os.listdir(self.simpath)
- files0 = []
- files1 = []
- for file_name in files:
- fpth0 = os.path.join(self.simpath, file_name)
- if os.path.isfile(fpth0):
- for extension in extensions:
- if file_name.lower().endswith(extension):
- files0.append(fpth0)
- fpth1 = os.path.join(
- self.simpath, "mf6_regression", file_name
- )
- files1.append(fpth1)
- break
- return files0, files1
-
- def _compare_heads(self, msgall, extensions="hds"):
- if isinstance(extensions, str):
- extensions = [extensions]
- success = True
- files0, files1 = self._regression_files(extensions)
- extension = "hds"
- ipos = 0
- for idx, (fpth0, fpth1) in enumerate(zip(files0, files1)):
- outfile = os.path.splitext(os.path.basename(fpth0))[0]
- outfile = os.path.join(
- self.simpath, outfile + f".{extension}.cmp.out"
- )
- success_tst = compare_heads(
- None,
- None,
- precision="double",
- htol=self.htol,
- text=extdict[extension],
- outfile=outfile,
- files1=fpth0,
- files2=fpth1,
- verbose=self.cmp_verbose,
- )
- msg = sfmt.format(
- f"{extdict[extension]} comparison {ipos + 1}",
- f"{self.name} ({os.path.basename(fpth0)})",
- )
- ipos += 1
- print(msg)
-
- if not success_tst:
- success = False
- msgall += msg + " ... FAILED\n"
-
- return success, msgall
-
- def _compare_concentrations(self, msgall, extensions="ucn"):
- if isinstance(extensions, str):
- extensions = [extensions]
- success = True
- files0, files1 = self._regression_files(extensions)
- extension = "ucn"
- ipos = 0
- for idx, (fpth0, fpth1) in enumerate(zip(files0, files1)):
- outfile = os.path.splitext(os.path.basename(fpth0))[0]
- outfile = os.path.join(
- self.simpath, outfile + f".{extension}.cmp.out"
- )
- success_tst = compare_heads(
- None,
- None,
- precision="double",
- htol=self.htol,
- text=extdict[extension],
- outfile=outfile,
- files1=fpth0,
- files2=fpth1,
- verbose=self.cmp_verbose,
- )
- msg = sfmt.format(
- f"{extdict[extension]} comparison {ipos + 1}",
- f"{self.name} ({os.path.basename(fpth0)})",
- )
- ipos += 1
- print(msg)
-
- if not success_tst:
- success = False
- msgall += msg + " ... FAILED\n"
-
- return success, msgall
-
- def _compare_budgets(self, msgall, extensions="cbc"):
- if isinstance(extensions, str):
- extensions = [extensions]
- success = True
- files0, files1 = self._regression_files(extensions)
- extension = "cbc"
- ipos = 0
- for idx, (fpth0, fpth1) in enumerate(zip(files0, files1)):
- if os.stat(fpth0).st_size * os.stat(fpth0).st_size == 0:
- continue
- outfile = os.path.splitext(os.path.basename(fpth0))[0]
- outfile = os.path.join(
- self.simpath, outfile + f".{extension}.cmp.out"
- )
- fcmp = open(outfile, "w")
-
- # open the files
- cbc0 = flopy.utils.CellBudgetFile(
- fpth0, precision="double", verbose=self.cmp_verbose
- )
- cbc1 = flopy.utils.CellBudgetFile(
- fpth1, precision="double", verbose=self.cmp_verbose
- )
-
- # build list of cbc data to retrieve
- avail0 = cbc0.get_unique_record_names()
- avail1 = cbc1.get_unique_record_names()
- avail0 = [t.decode().strip() for t in avail0]
- avail1 = [t.decode().strip() for t in avail1]
-
- # initialize list for storing totals for each budget term terms
- cbc_keys0 = []
- cbc_keys1 = []
- for t in avail0:
- t1 = t
- if t not in avail1:
- # check if RCHA or EVTA is available and use that instead
- # should be able to remove this once v6.3.0 is released
- if t[:-1] in avail1:
- t1 = t[:-1]
- else:
- raise Exception(f"Could not find {t} in {fpth1}")
- cbc_keys0.append(t)
- cbc_keys1.append(t1)
-
- # get list of times and kstpkper
- kk = cbc0.get_kstpkper()
- times = cbc0.get_times()
-
- # process data
- success_tst = True
- for key, key1 in zip(cbc_keys0, cbc_keys1):
- for idx, (k, t) in enumerate(zip(kk, times)):
- v0 = cbc0.get_data(kstpkper=k, text=key)[0]
- v1 = cbc1.get_data(kstpkper=k, text=key1)[0]
- if v0.dtype.names is not None:
- v0 = v0["q"]
- v1 = v1["q"]
- # skip empty vectors
- if v0.size < 1:
- continue
- vmin = self.rclose
- if vmin < 1e-6:
- vmin = 1e-6
- vmin_tol = 5.0 * vmin
- idx = (abs(v0) > vmin) & (abs(v1) > vmin)
- diff = np.zeros(v0.shape, dtype=v0.dtype)
- diff[idx] = abs(v0[idx] - v1[idx])
- diffmax = diff.max()
- indices = np.where(diff == diffmax)[0]
- if diffmax > vmin_tol:
- success_tst = False
- msg = (
- f"{os.path.basename(fpth0)} - "
- + f"{key:16s} "
- + f"difference ({diffmax:10.4g}) "
- + f"> {vmin_tol:10.4g} "
- + f"at {indices.size} nodes "
- + f" [first location ({indices[0] + 1})] "
- + f"at time {t} "
- )
- fcmp.write(f"{msg}\n")
- if self.cmp_verbose:
- print(msg)
-
- msg = sfmt.format(
- f"{extdict[extension]} comparison {ipos + 1}",
- f"{self.name} ({os.path.basename(fpth0)})",
- )
- ipos += 1
- print(msg)
-
- fcmp.close()
-
- if not success_tst:
- success = False
- msgall += msg + " ... FAILED\n"
-
- return success, msgall
-
-
-def api_return(success, model_ws):
- """
- parse libmf6 stdout shared object file
- """
- fpth = os.path.join(model_ws, "mfsim.stdout")
- return success, open(fpth).readlines()
diff --git a/autotest/test_cli.py b/autotest/test_cli.py
index aa9185ceb46..6cbf1bb2e0c 100644
--- a/autotest/test_cli.py
+++ b/autotest/test_cli.py
@@ -1,29 +1,21 @@
-import re
+import platform
import subprocess
from conftest import project_root_path
bin_path = project_root_path / "bin"
-
-
-def split_nonnumeric(s):
- match = re.compile("[^0-9]").search(s)
- return [s[:match.start()], s[match.start():]] if match else s
+ext = ".exe" if platform.system() == "Windows" else ""
+exe = f"mf6{ext}"
def test_cli_version():
output = " ".join(
- subprocess.check_output([str(bin_path / "mf6"), "-v"]).decode().split()
+ subprocess.check_output([str(bin_path / exe), "-v"]).decode().split()
)
print(output)
- assert output.startswith("mf6:")
-
- version = (
- output.lower().split(' ')[1]
- )
+ assert output.startswith(f"{exe}:"), f"found: {output}"
+ version = output.lower().split(" ")[1]
print(version)
v_split = version.split(".")
- assert len(v_split) == 3
- assert all(s.isdigit() for s in v_split[:2])
- sol = split_nonnumeric(v_split[2])
- assert sol[0].isdigit()
+ assert len(v_split) >= 2
+ assert all(s[-1].isdigit() for s in v_split[:2])
diff --git a/autotest/test_examples.py b/autotest/test_examples.py
new file mode 100644
index 00000000000..5262f6ed7cf
--- /dev/null
+++ b/autotest/test_examples.py
@@ -0,0 +1,60 @@
+import pytest
+
+from framework import TestFramework
+
+# skip nested models
+# ex-gwf-csub-p02c has subdirs like 'es-001', 'hb-100'
+# all others just have 2 folders 'mf6gwf' and 'mf6gwt'
+excluded_models = [
+ "ex-gwf-csub-p02c",
+ "ex-gwt-hecht-mendez-b",
+ "ex-gwt-hecht-mendez-c",
+ "ex-gwt-keating",
+ "ex-gwt-moc3d-p01a",
+ "ex-gwt-moc3d-p01b",
+ "ex-gwt-moc3d-p01c",
+ "ex-gwt-moc3d-p01d",
+ "ex-gwt-moc3d-p02",
+ "ex-gwt-moc3d-p02tg",
+ "ex-gwt-mt3dms-p02a",
+ "ex-gwt-mt3dms-p02b",
+ "ex-gwt-mt3dms-p02c",
+ "ex-gwt-mt3dms-p02d",
+ "ex-gwt-mt3dms-p02e",
+ "ex-gwt-mt3dms-p02f",
+ "ex-gwt-mt3dsupp631",
+ "ex-gwt-mt3dsupp632a",
+ "ex-gwt-mt3dsupp632b",
+ "ex-gwt-mt3dsupp632c",
+ "ex-gwt-mt3dsupp82",
+ "ex-gwt-prudic2004t2",
+]
+
+
+@pytest.mark.large
+@pytest.mark.repo
+@pytest.mark.regression
+@pytest.mark.slow
+def test_scenario(
+ # https://modflow-devtools.readthedocs.io/en/latest/md/fixtures.html#example-scenarios
+ function_tmpdir,
+ example_scenario,
+ targets,
+):
+ name, namefiles = example_scenario
+ if name in excluded_models:
+ pytest.skip(f"Skipping: {name} (excluded)")
+
+ model_paths = [nf.parent for nf in namefiles]
+ for model_path in model_paths:
+ model_name = f"{name}_{model_path.name}"
+ workspace = function_tmpdir / model_name
+ test = TestFramework(
+ name=model_name,
+ workspace=model_path,
+ targets=targets,
+ compare="mf6_regression",
+ verbose=False,
+ )
+ test.setup(model_path, workspace)
+ test.run()
diff --git a/autotest/test_gwf.py b/autotest/test_gwf.py
deleted file mode 100644
index 59e0d18f6ac..00000000000
--- a/autotest/test_gwf.py
+++ /dev/null
@@ -1,27 +0,0 @@
-from modflow_devtools.executables import Executables
-from pytest_cases import parametrize_with_cases
-from simulation import TestSimulation
-from test_gwf_maw04 import GwfMaw04Cases
-from test_gwf_maw_cases import GwfMawCases
-
-
-@parametrize_with_cases("case", cases=[GwfMawCases, GwfMaw04Cases])
-def test_gwf_models(case, targets: Executables):
- data, sim, cmp, exfunc = case
- sim.write_simulation()
- if cmp:
- cmp.write_simulation()
-
- test = TestSimulation(
- name=data.name,
- exe_dict=targets,
- exfunc=exfunc,
- idxsim=0, # TODO: remove parameter from TestSimulation
- mf6_regression=True,
- require_failure=data.xfail,
- make_comparison=data.compare,
- )
-
- test.set_model(sim.simulation_data.mfpath.get_sim_path(), testModel=False)
- test.run()
- test.compare()
diff --git a/autotest/test_gwf_ats01.py b/autotest/test_gwf_ats01.py
index 167ce57ba35..a2becae2240 100644
--- a/autotest/test_gwf_ats01.py
+++ b/autotest/test_gwf_ats01.py
@@ -1,6 +1,5 @@
"""
Test adaptive time step module
-
"""
import os
@@ -8,10 +7,10 @@
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
-ex = ["gwf_ats01a"]
+cases = ["gwf_ats01a"]
nlay, nrow, ncol = 1, 1, 2
# set dt0, dtmin, dtmax, dtadj, dtfailadj
@@ -22,7 +21,7 @@
dtfailadj = 5.0
-def build_model(idx, dir):
+def build_models(idx, test):
perlen = [10]
nper = len(perlen)
nstp = [1]
@@ -41,10 +40,10 @@ def build_model(idx, dir):
for id in range(nper):
tdis_rc.append((perlen[id], nstp[id], tsmult[id]))
- name = ex[idx]
+ name = cases[idx]
# build MODFLOW 6 files
- ws = dir
+ ws = test.workspace
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
@@ -175,11 +174,9 @@ def build_model(idx, dir):
return sim, None
-def eval_flow(sim):
- print("evaluating flow...")
-
+def check_output(idx, test):
# This will fail if budget numbers cannot be read
- fpth = os.path.join(sim.simpath, f"{sim.name}.lst")
+ fpth = os.path.join(test.workspace, f"{test.name}.lst")
mflist = flopy.utils.Mf6ListBudget(fpth)
names = mflist.get_record_names()
inc = mflist.get_incremental()
@@ -189,7 +186,7 @@ def eval_flow(sim):
assert v == 10.0, f"Last time should be 10. Found {v}"
# ensure obs results changing monotonically
- fpth = os.path.join(sim.simpath, sim.name + ".obs.csv")
+ fpth = os.path.join(test.workspace, test.name + ".obs.csv")
try:
tc = np.genfromtxt(fpth, names=True, delimiter=",")
except:
@@ -205,17 +202,13 @@ def eval_flow(sim):
assert v == 10.0, f"Last time should be 10. Found {v}"
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- workspace = str(function_tmpdir)
- test = TestFramework()
- test.build(build_model, 0, workspace)
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, exfunc=eval_flow, idxsim=0
- ),
- workspace,
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ targets=targets,
)
+ test.run()
diff --git a/autotest/test_gwf_ats02.py b/autotest/test_gwf_ats02.py
index 7b41db9cfdc..7e03230de24 100644
--- a/autotest/test_gwf_ats02.py
+++ b/autotest/test_gwf_ats02.py
@@ -1,7 +1,6 @@
"""
Test adaptive time step module with a one-d vertical column in which cells
dry and then rewet based on a ghb in the bottom cell.
-
"""
import os
@@ -9,10 +8,10 @@
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
-ex = ["gwf_ats02a"]
+cases = ["gwf_ats02a"]
nlay, nrow, ncol = 5, 1, 1
botm = [80.0, 60.0, 40.0, 20.0, 0.0]
@@ -24,7 +23,7 @@
dtfailadj = 5.0
-def build_model(idx, dir):
+def build_models(idx, test):
perlen = [10, 10]
nper = len(perlen)
nstp = [5, 5]
@@ -42,10 +41,10 @@ def build_model(idx, dir):
for id in range(nper):
tdis_rc.append((perlen[id], nstp[id], tsmult[id]))
- name = ex[idx]
+ name = cases[idx]
# build MODFLOW 6 files
- ws = dir
+ ws = test.workspace
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
@@ -172,11 +171,11 @@ def build_model(idx, dir):
return sim, None
-def make_plot(sim):
+def make_plot(test):
print("making plots...")
- ws = sim.simpath
+ ws = test.workspace
- fname = sim.name + ".hds"
+ fname = test.name + ".hds"
fname = os.path.join(ws, fname)
hobj = flopy.utils.HeadFile(fname, precision="double")
head = hobj.get_alldata()[:, :, 0, 0]
@@ -206,11 +205,9 @@ def make_plot(sim):
plt.show()
-def eval_flow(sim):
- print("evaluating flow...")
-
+def check_output(idx, test):
# This will fail if budget numbers cannot be read
- fpth = os.path.join(sim.simpath, f"{sim.name}.lst")
+ fpth = os.path.join(test.workspace, f"{test.name}.lst")
mflist = flopy.utils.Mf6ListBudget(fpth)
names = mflist.get_record_names()
inc = mflist.get_incremental()
@@ -220,7 +217,7 @@ def eval_flow(sim):
assert v == 20.0, f"Last time should be 20. Found {v}"
# ensure obs results changing monotonically
- fpth = os.path.join(sim.simpath, sim.name + ".obs.csv")
+ fpth = os.path.join(test.workspace, test.name + ".obs.csv")
try:
tc = np.genfromtxt(fpth, names=True, delimiter=",")
except:
@@ -231,16 +228,13 @@ def eval_flow(sim):
), "layer 1 should be dry for this period"
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, 0, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, exfunc=eval_flow, idxsim=0
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ targets=targets,
)
+ test.run()
diff --git a/autotest/test_gwf_ats03.py b/autotest/test_gwf_ats03.py
index b36908ef866..7685321011f 100644
--- a/autotest/test_gwf_ats03.py
+++ b/autotest/test_gwf_ats03.py
@@ -7,7 +7,6 @@
time zero and drops to 50.0 at time 100. So the constant head values, which
are observed and written to and obs output file must fall on a line between
(0, 100) and (100, 50), which is ensured by this test.
-
"""
import os
@@ -15,14 +14,14 @@
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
-ex = ["gwf_ats03a"]
+cases = ["gwf_ats03a"]
nlay, nrow, ncol = 1, 1, 10
-def build_model(idx, dir):
+def build_models(idx, test):
perlen = [100.0]
nper = len(perlen)
nstp = [1]
@@ -41,10 +40,10 @@ def build_model(idx, dir):
for id in range(nper):
tdis_rc.append((perlen[id], nstp[id], tsmult[id]))
- name = ex[idx]
+ name = cases[idx]
# build MODFLOW 6 files
- ws = dir
+ ws = test.workspace
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
@@ -191,11 +190,9 @@ def build_model(idx, dir):
return sim, None
-def eval_flow(sim):
- print("evaluating flow...")
-
+def check_output(idx, test):
# ensure obs2 (a constant head time series) drops linearly from 100 to 50
- fpth = os.path.join(sim.simpath, sim.name + ".obs.csv")
+ fpth = os.path.join(test.workspace, test.name + ".obs.csv")
try:
tc = np.genfromtxt(fpth, names=True, delimiter=",")
except:
@@ -207,16 +204,13 @@ def eval_flow(sim):
assert np.allclose(answer, result), msg
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, 0, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, exfunc=eval_flow, idxsim=0
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ targets=targets,
)
+ test.run()
diff --git a/autotest/test_gwf_ats_lak01.py b/autotest/test_gwf_ats_lak01.py
index 5f2381c08f3..90d9a0ad55d 100644
--- a/autotest/test_gwf_ats_lak01.py
+++ b/autotest/test_gwf_ats_lak01.py
@@ -1,16 +1,18 @@
-# Same as test_gwf_lak01 except it uses ATS. Test works by trying a
-# large time step that does not converge. ATS must then retry using
-# a smaller time step.
+"""
+Same as test_gwf_lak01 except it uses ATS. Test works by trying a
+large time step that does not converge. ATS must then retry using
+a smaller time step.
+"""
import os
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
-ex = ["gwf_ats_lak_01a"]
+cases = ["gwf_ats_lak_01a"]
gwf = None
@@ -21,7 +23,7 @@ def get_idomain(nlay, nrow, ncol, lakend):
return idomain
-def build_model(idx, dir):
+def build_models(idx, test):
lx = 300.0
lz = 45.0
nlay = 45
@@ -48,10 +50,10 @@ def build_model(idx, dir):
nouter, ninner = 250, 300
hclose, rclose, relax = 1e-8, 1e-6, 0.97
- name = ex[idx]
+ name = cases[idx]
# build MODFLOW 6 files
- ws = dir
+ ws = test.workspace
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
@@ -152,7 +154,7 @@ def build_model(idx, dir):
irch[i, j] = k + 1
nlakeconn = len(lake_vconnect)
- # pak_data = [lakeno, strt, nlakeconn]
+ # pak_data = [ifno, strt, nlakeconn]
initial_stage = 0.1
pak_data = [(0, initial_stage, nlakeconn)]
@@ -257,7 +259,7 @@ def make_plot_xsect(sim, headall, stageall):
# ax.set_ylim(-10, 5)
fname = "fig-xsect.pdf"
- fname = os.path.join(sim.simpath, fname)
+ fname = os.path.join(sim.workspace, fname)
plt.savefig(fname, bbox_inches="tight")
@@ -274,7 +276,7 @@ def make_plot(sim, times, headall, stageall):
ax.plot(times, h, "bo-", label="max head")
fname = "fig-timeseries.pdf"
- fname = os.path.join(sim.simpath, fname)
+ fname = os.path.join(sim.workspace, fname)
plt.savefig(fname, bbox_inches="tight")
@@ -288,19 +290,17 @@ def get_kij_from_node(node, nrow, ncol):
return k, i, j
-def eval_results(sim):
- print("evaluating results...")
-
+def check_output(idx, test):
# calculate volume of water and make sure it is conserved
- fname = sim.name + ".lak.bin"
- fname = os.path.join(sim.simpath, fname)
+ fname = test.name + ".lak.bin"
+ fname = os.path.join(test.workspace, fname)
assert os.path.isfile(fname)
bobj = flopy.utils.HeadFile(fname, text="STAGE")
times = bobj.get_times()
stage = bobj.get_alldata()
- fname = sim.name + ".cbc"
- fname = os.path.join(sim.simpath, fname)
+ fname = test.name + ".cbc"
+ fname = os.path.join(test.workspace, fname)
bobj = flopy.utils.CellBudgetFile(fname, precision="double", verbose=False)
times = bobj.get_times()
idomain = gwf.dis.idomain.array
@@ -308,7 +308,6 @@ def eval_results(sim):
all_passed = True
for itime, t in enumerate(times):
-
print(f"processing totim {t}")
stage_current = stage[itime].flatten()
print(f"lake stage = {stage_current}")
@@ -343,8 +342,8 @@ def eval_results(sim):
print(msg)
assert all_passed, "found recharge applied to cell beneath active lake"
- fname = sim.name + ".hds"
- fname = os.path.join(sim.simpath, fname)
+ fname = test.name + ".hds"
+ fname = os.path.join(test.workspace, fname)
assert os.path.isfile(fname)
hobj = flopy.utils.HeadFile(fname)
head = hobj.get_alldata()
@@ -410,16 +409,13 @@ def eval_results(sim):
@pytest.mark.slow
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, 0, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, exfunc=eval_results, idxsim=0
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ targets=targets,
)
+ test.run()
diff --git a/autotest/test_gwf_auxvars.py b/autotest/test_gwf_auxvars.py
index 412b53a8385..d80123906a4 100644
--- a/autotest/test_gwf_auxvars.py
+++ b/autotest/test_gwf_auxvars.py
@@ -1,18 +1,17 @@
import os
-import sys
import flopy
import numpy as np
import pytest
-from framework import TestFramework
-from simulation import TestSimulation
-ex = ["aux01"]
+from framework import DNODATA, TestFramework
+
+cases = ["aux01"]
auxvar1 = 101.0
auxvar2 = 102.0
-def build_model(idx, dir):
+def build_models(idx, test):
nlay, nrow, ncol = 1, 10, 10
nper = 3
perlen = [1.0, 1.0, 1.0]
@@ -30,10 +29,10 @@ def build_model(idx, dir):
for i in range(nper):
tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
- name = ex[idx]
+ name = cases[idx]
# build MODFLOW 6 files
- ws = dir
+ ws = test.workspace
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
@@ -176,15 +175,15 @@ def build_model(idx, dir):
)
# sfr.remove()
- # [] []
+ # [] []
packagedata = [
[0, 100.0, 1, auxvar1, auxvar2, "lake1"],
[1, 100.0, 1, auxvar1, auxvar2, "lake2"],
]
- #
+ #
connectiondata = [
- [0, 0, (0, 1, 1), "vertical", "none", 0.0, 0.0, 0.0, 0.0],
- [1, 0, (0, 2, 2), "vertical", "none", 0.0, 0.0, 0.0, 0.0],
+ [0, 0, (0, 1, 1), "vertical", DNODATA, 0.0, 0.0, 0.0, 0.0],
+ [1, 0, (0, 2, 2), "vertical", DNODATA, 0.0, 0.0, 0.0, 0.0],
]
lak = flopy.mf6.ModflowGwflak(
gwf,
@@ -202,14 +201,14 @@ def build_model(idx, dir):
)
# lak.remove()
- # []
+ # []
packagedata = [
[0, (0, nrow - 1, 5), 1, -1, 0.1, 0.01, 0.01, 0.1, 0.01, 3.5, "uz1"],
[1, (0, nrow - 1, 6), 1, -1, 0.1, 0.01, 0.01, 0.1, 0.01, 3.5, "uz1"],
[2, (0, nrow - 1, 7), 1, -1, 0.1, 0.01, 0.01, 0.1, 0.01, 3.5, "uz1"],
[3, (0, nrow - 1, 8), 1, -1, 0.1, 0.01, 0.01, 0.1, 0.01, 3.5, "uz1"],
]
- # []
+ # []
perioddata = []
for p in packagedata:
perioddata.append(
@@ -244,11 +243,9 @@ def build_model(idx, dir):
return sim, None
-def eval_model(sim):
- print("evaluating model...")
-
+def check_output(idx, test):
# maw budget aux variables
- fpth = os.path.join(sim.simpath, "aux01.maw.bud")
+ fpth = os.path.join(test.workspace, "aux01.maw.bud")
bobj = flopy.utils.CellBudgetFile(fpth, precision="double")
records = bobj.get_data(text="auxiliary")
for r in records:
@@ -256,7 +253,7 @@ def eval_model(sim):
assert np.allclose(r["AUX2"], auxvar2)
# sfr budget aux variables
- fpth = os.path.join(sim.simpath, "aux01.sfr.bud")
+ fpth = os.path.join(test.workspace, "aux01.sfr.bud")
bobj = flopy.utils.CellBudgetFile(fpth, precision="double")
records = bobj.get_data(text="auxiliary")
for r in records:
@@ -264,7 +261,7 @@ def eval_model(sim):
assert np.allclose(r["AUX2"], auxvar2)
# lak budget aux variables
- fpth = os.path.join(sim.simpath, "aux01.maw.bud")
+ fpth = os.path.join(test.workspace, "aux01.maw.bud")
bobj = flopy.utils.CellBudgetFile(fpth, precision="double")
records = bobj.get_data(text="auxiliary")
for r in records:
@@ -272,7 +269,7 @@ def eval_model(sim):
assert np.allclose(r["AUX2"], auxvar2)
# uzf budget aux variables
- fpth = os.path.join(sim.simpath, "aux01.uzf.bud")
+ fpth = os.path.join(test.workspace, "aux01.uzf.bud")
bobj = flopy.utils.CellBudgetFile(fpth, precision="double")
records = bobj.get_data(text="auxiliary")
for r in records:
@@ -280,7 +277,7 @@ def eval_model(sim):
assert np.allclose(r["AUX2"], auxvar2)
# gwf budget maw aux variables
- fpth = os.path.join(sim.simpath, "aux01.cbc")
+ fpth = os.path.join(test.workspace, "aux01.cbc")
bobj = flopy.utils.CellBudgetFile(fpth, precision="double")
records = bobj.get_data(text="maw")
for r in records:
@@ -300,16 +297,13 @@ def eval_model(sim):
assert np.allclose(r["AUX2"], auxvar2)
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, exfunc=eval_model, idxsim=idx
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ targets=targets,
)
+ test.run()
diff --git a/autotest/test_gwf_auxvars02.py b/autotest/test_gwf_auxvars02.py
index c9ef34ee4d5..b7c82acf1a0 100644
--- a/autotest/test_gwf_auxvars02.py
+++ b/autotest/test_gwf_auxvars02.py
@@ -1,16 +1,15 @@
import os
-import sys
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
-ex = ["aux02"]
+cases = ["aux02"]
-def build_model(idx, dir):
+def build_models(idx, test):
nlay, nrow, ncol = 1, 10, 10
nper = 3
perlen = [1.0, 1.0, 1.0]
@@ -28,10 +27,10 @@ def build_model(idx, dir):
for i in range(nper):
tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
- name = ex[idx]
+ name = cases[idx]
# build MODFLOW 6 files
- ws = dir
+ ws = test.workspace
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
@@ -107,11 +106,9 @@ def build_model(idx, dir):
return sim, None
-def eval_model(sim):
- print("evaluating model...")
-
+def check_output(idx, test):
# maw budget aux variables
- fpth = os.path.join(sim.simpath, "aux02.bud")
+ fpth = os.path.join(test.workspace, "aux02.bud")
bobj = flopy.utils.CellBudgetFile(fpth, precision="double")
records = bobj.get_data(text="CHD")
for r in records:
@@ -120,16 +117,13 @@ def eval_model(sim):
assert np.allclose(r[aname], a)
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, exfunc=eval_model, idxsim=idx
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ targets=targets,
)
+ test.run()
diff --git a/autotest/test_gwf_boundname01.py b/autotest/test_gwf_boundname01.py
index 8a5ac6050cb..e10fdc54fa1 100644
--- a/autotest/test_gwf_boundname01.py
+++ b/autotest/test_gwf_boundname01.py
@@ -3,26 +3,21 @@
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
-ex = [
+cases = [
"bndname01",
]
-def build_model(idx, exdir):
-
- sim = get_model(idx, exdir)
-
- ws = os.path.join(exdir, "mf6")
- mc = get_model(idx, ws)
-
+def build_models(idx, test):
+ sim = get_model(idx, test.workspace)
+ mc = get_model(idx, os.path.join(test.workspace, "mf6"))
return sim, mc
def get_model(idx, ws):
-
nlay, nrow, ncol = 1, 1, 100
nper = 1
perlen = [5.0]
@@ -51,7 +46,7 @@ def get_model(idx, ws):
for i in range(nper):
tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
- name = ex[idx]
+ name = cases[idx]
# build MODFLOW 6 files
sim = flopy.mf6.MFSimulation(
@@ -144,7 +139,7 @@ def get_model(idx, ws):
def replace_quotes(idx, exdir):
ws = os.path.join(exdir, "mf6")
- gwfname = f"gwf_{ex[idx]}"
+ gwfname = f"gwf_{cases[idx]}"
extensions = (".chd", ".chd.obs")
for ext in extensions:
fpth = os.path.join(ws, f"{gwfname}{ext}")
@@ -155,14 +150,12 @@ def replace_quotes(idx, exdir):
f.write(line.replace("'", '"').replace('face"s', "face's"))
-def eval_obs(sim):
- print("evaluating observations results..." f"({sim.name})")
-
- fpth = os.path.join(sim.simpath, f"gwf_{sim.name}.chd.obs.csv")
+def check_output(idx, test):
+ fpth = os.path.join(test.workspace, f"gwf_{test.name}.chd.obs.csv")
obs0 = np.genfromtxt(fpth, delimiter=",", names=True)
names0 = obs0.dtype.names
- fpth = os.path.join(sim.simpath, "mf6", f"gwf_{sim.name}.chd.obs.csv")
+ fpth = os.path.join(test.workspace, "mf6", f"gwf_{test.name}.chd.obs.csv")
obs1 = np.genfromtxt(fpth, delimiter=",", names=True)
names1 = obs1.dtype.names
@@ -170,14 +163,13 @@ def eval_obs(sim):
assert np.array_equal(obs0, obs1), "observations are not identical"
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, 0, str(function_tmpdir))
- test.run(
- TestSimulation(name=name, exe_dict=targets, idxsim=0),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ targets=targets,
)
+ test.run()
diff --git a/autotest/test_gwf_buy_lak01.py b/autotest/test_gwf_buy_lak01.py
index 7a12bb2cffa..8641c87d8b7 100644
--- a/autotest/test_gwf_buy_lak01.py
+++ b/autotest/test_gwf_buy_lak01.py
@@ -1,28 +1,30 @@
-# Test the buoyancy package and the variable density flows between the lake
-# and the gwf model. This model has 4 layers and a lake incised within it.
-# The model is transient and has heads in the aquifer higher than the initial
-# stage in the lake. As the model runs, the lake and aquifer equalize and
-# should end up at the same level. The test ensures that the initial and
-# final water volumes in the entire system are the same. There are three
-# different cases:
-# 1. No buoyancy package
-# 2. Buoyancy package with lake and aquifer density = 1000.
-# 3. Buoyancy package with lake and aquifer density = 1024.5
+"""
+Test the buoyancy package and the variable density flows between the lake
+and the gwf model. This model has 4 layers and a lake incised within it.
+The model is transient and has heads in the aquifer higher than the initial
+stage in the lake. As the model runs, the lake and aquifer equalize and
+should end up at the same level. The test ensures that the initial and
+final water volumes in the entire system are the same. There are three
+different cases:
+ 1. No buoyancy package
+ 2. Buoyancy package with lake and aquifer density = 1000.
+ 3. Buoyancy package with lake and aquifer density = 1024.5
+"""
import os
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
-ex = ["buy_lak_01a"] # , 'buy_lak_01b', 'buy_lak_01c']
+cases = ["buy_lak_01a"] # , 'buy_lak_01b', 'buy_lak_01c']
buy_on_list = [False] # , True, True]
concbuylist = [0.0] # , 0., 35.]
-def build_model(idx, dir):
+def build_models(idx, test):
lx = 7.0
lz = 4.0
nlay = 4
@@ -49,10 +51,10 @@ def build_model(idx, dir):
nouter, ninner = 700, 300
hclose, rclose, relax = 1e-8, 1e-6, 0.97
- name = ex[idx]
+ name = cases[idx]
# build MODFLOW 6 files
- ws = dir
+ ws = test.workspace
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
@@ -130,14 +132,14 @@ def build_model(idx, dir):
)
nlakeconn = 11 # note: number of connections for this lake
- # pak_data = [lakeno, strt, nlakeconn, dense, boundname]
+ # pak_data = [ifno, strt, nlakeconn, dense, boundname]
pak_data = [(0, 2.25, nlakeconn, lake_dense)]
connlen = delr / 2.0
connwidth = delc
bedleak = "None"
con_data = [
- # con_data=(lakeno,iconn,(cellid),claktype,bedleak,belev,telev,connlen,connwidth )
+ # con_data=(ifno,iconn,(cellid),claktype,bedleak,belev,telev,connlen,connwidth )
(0, 0, (0, 0, 0), "HORIZONTAL", bedleak, 10, 10, connlen, connwidth),
(0, 1, (1, 0, 1), "VERTICAL", bedleak, 10, 10, connlen, connwidth),
(0, 2, (1, 0, 1), "HORIZONTAL", bedleak, 10, 10, connlen, connwidth),
@@ -208,20 +210,18 @@ def build_model(idx, dir):
return sim, None
-def eval_results(sim):
- print("evaluating results...")
-
+def check_output(idx, test):
# calculate volume of water and make sure it is conserved
- gwfname = "gwf_" + sim.name
+ gwfname = "gwf_" + test.name
fname = gwfname + ".lak.bin"
- fname = os.path.join(sim.simpath, fname)
+ fname = os.path.join(test.workspace, fname)
assert os.path.isfile(fname)
bobj = flopy.utils.HeadFile(fname, text="STAGE")
stage = bobj.get_alldata().flatten()
# print(stage)
fname = gwfname + ".hds"
- fname = os.path.join(sim.simpath, fname)
+ fname = os.path.join(test.workspace, fname)
assert os.path.isfile(fname)
hobj = flopy.utils.HeadFile(fname)
head = hobj.get_data()
@@ -252,16 +252,13 @@ def eval_results(sim):
# assert False
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, 0, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, exfunc=eval_results, idxsim=0
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ targets=targets,
)
+ test.run()
diff --git a/autotest/test_gwf_buy_lak02.py b/autotest/test_gwf_buy_lak02.py
index 514f3348da5..204cc382363 100644
--- a/autotest/test_gwf_buy_lak02.py
+++ b/autotest/test_gwf_buy_lak02.py
@@ -1,90 +1,285 @@
+"""
+Test the buoyancy package and the variable density flows between the lake
+and the gwf model. This model has 4 layers and a lake incised within it.
+The model is transient and has heads in the aquifer higher than the initial
+stage in the lake. As the model runs, the lake and aquifer equalize and
+should end up at the same level. The test ensures that the initial and
+final water volumes in the entire system are the same. This test is different
+from the previous test in that transport is active. There are four
+different cases:
+ 1. lak and aquifer have concentration of 0.
+ 2. lak and aquifer have concentration of 35.
+ 3. lak has concentration of 0., aquifer is 35.
+ 4. lak has concentration of 35., aquifer is 0.
+"""
+
import os
-from typing import NamedTuple
import flopy
import numpy as np
+import pytest
+
from framework import TestFramework
-from pytest_cases import parametrize, parametrize_with_cases
-from simulation import TestSimulation
-
-
-class GwfBuyLakCases:
- """
- Test the buoyancy package and the variable density flows between the lake
- and the gwf model. This model has 4 layers and a lake incised within it.
- The model is transient and has heads in the aquifer higher than the initial
- stage in the lake. As the model runs, the lake and aquifer equalize and
- should end up at the same level. The test ensures that the initial and
- final water volumes in the entire system are the same. This test is different
- from the previous test in that transport is active. There are four
- different cases:
- 1. lak and aquifer have concentration of 0.
- 2. lak and aquifer have concentration of 35.
- 3. lak has concentration of 0., aquifer is 35.
- 4. lak has concentration of 35., aquifer is 0.
- """
-
- class Data(NamedTuple):
- name: str
- gwt_conc: float
- lak_conc: float
-
- @parametrize(
- data=[
- Data(name="a", gwt_conc=0, lak_conc=0),
- Data(name="b", gwt_conc=35, lak_conc=35),
- Data(name="c", gwt_conc=35, lak_conc=0),
- Data(name="d", gwt_conc=0, lak_conc=35),
- ]
+
+simname = "gwfbuylak02"
+cases = [
+ f"{simname}a",
+ f"{simname}b",
+ f"{simname}c",
+ f"{simname}d",
+]
+gwt_conc = [0, 35, 35, 0]
+lak_conc = [0, 35, 0, 35]
+
+
+def build_models(idx, test):
+ name = cases[idx]
+
+ lx = 7.0
+ lz = 4.0
+ nlay = 4
+ nrow = 1
+ ncol = 7
+ nper = 1
+ delc = 1.0
+ delr = lx / ncol
+ delz = lz / nlay
+ top = 4.0
+ botm = [3.0, 2.0, 1.0, 0.0]
+
+ perlen = [50.0]
+ nstp = [50]
+ tsmult = [1.0]
+
+ Kh = 1.0
+ Kv = 1.0
+
+ tdis_rc = []
+ for i in range(nper):
+ tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
+
+ nouter, ninner = 700, 300
+ hclose, rclose, relax = 1e-8, 1e-6, 0.97
+
+ # build MODFLOW 6 files
+ sim = flopy.mf6.MFSimulation(
+ sim_name=name,
+ version="mf6",
+ exe_name="mf6",
+ sim_ws=test.workspace,
+ )
+ # create tdis package
+ tdis = flopy.mf6.ModflowTdis(
+ sim, time_units="DAYS", nper=nper, perioddata=tdis_rc
)
- def case_generator(self, data, function_tmpdir):
- lx = 7.0
- lz = 4.0
- nlay = 4
- nrow = 1
- ncol = 7
- nper = 1
- delc = 1.0
- delr = lx / ncol
- delz = lz / nlay
- top = 4.0
- botm = [3.0, 2.0, 1.0, 0.0]
-
- perlen = [50.0]
- nstp = [50]
- tsmult = [1.0]
-
- Kh = 1.0
- Kv = 1.0
-
- tdis_rc = []
- for i in range(nper):
- tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
-
- nouter, ninner = 700, 300
- hclose, rclose, relax = 1e-8, 1e-6, 0.97
-
- # build MODFLOW 6 files
- sim = flopy.mf6.MFSimulation(
- sim_name=data.name,
- version="mf6",
- exe_name="mf6",
- sim_ws=str(function_tmpdir),
- )
- # create tdis package
- tdis = flopy.mf6.ModflowTdis(
- sim, time_units="DAYS", nper=nper, perioddata=tdis_rc
- )
- # create gwf model
- gwfname = "gwf_" + data.name
- gwtname = "gwt_" + data.name
+ # create gwf model
+ gwfname = "gwf_" + name
+ gwtname = "gwt_" + name
+
+ gwf = flopy.mf6.ModflowGwf(sim, modelname=gwfname, newtonoptions="NEWTON")
+
+ imsgwf = flopy.mf6.ModflowIms(
+ sim,
+ print_option="ALL",
+ outer_dvclose=hclose,
+ outer_maximum=nouter,
+ under_relaxation="NONE",
+ inner_maximum=ninner,
+ inner_dvclose=hclose,
+ rcloserecord=f"{rclose} strict",
+ linear_acceleration="BICGSTAB",
+ scaling_method="NONE",
+ reordering_method="NONE",
+ relaxation_factor=relax,
+ filename=f"{gwfname}.ims",
+ )
- gwf = flopy.mf6.ModflowGwf(
- sim, modelname=gwfname, newtonoptions="NEWTON"
- )
+ idomain = np.full((nlay, nrow, ncol), 1)
+ idomain[0, 0, 1:6] = 0
+ idomain[1, 0, 2:5] = 0
+ idomain[2, 0, 3:4] = 0
+ dis = flopy.mf6.ModflowGwfdis(
+ gwf,
+ nlay=nlay,
+ nrow=nrow,
+ ncol=ncol,
+ delr=delr,
+ delc=delc,
+ top=top,
+ botm=botm,
+ idomain=idomain,
+ )
+
+ # initial conditions
+ strt = np.zeros((nlay, nrow, ncol), dtype=float)
+ strt[0, 0, :] = 3.5
+ strt[1, 0, :] = 3.0
+ strt[1, 0, 1:6] = 2.5
+ strt[2, 0, :] = 2.0
+ strt[3, 0, :] = 1.0
+ ic = flopy.mf6.ModflowGwfic(gwf, strt=strt)
+
+ # node property flow
+ npf = flopy.mf6.ModflowGwfnpf(
+ gwf,
+ xt3doptions=False,
+ save_flows=True,
+ save_specific_discharge=True,
+ icelltype=1,
+ k=Kh,
+ k33=Kv,
+ )
+
+ sto = flopy.mf6.ModflowGwfsto(gwf, sy=0.3, ss=0.0, iconvert=1)
+
+ buy_on = True
+ if buy_on:
+ pd = [(0, 0.7, 0.0, gwtname, "CONCENTRATION")]
+ buy = flopy.mf6.ModflowGwfbuy(gwf, denseref=1000.0, packagedata=pd)
+
+ nlakeconn = 11 # note: number of connections for this lake
+ # pak_data = [ifno, strt, nlakeconn, testauxvar, concentration, boundname]
+ pak_data = [(0, 2.25, nlakeconn, 0.0, 0.0)]
+
+ connlen = delr / 2.0
+ connwidth = delc
+ bedleak = "None"
+ con_data = [
+ # con_data=(ifno,iconn,(cellid),claktype,bedleak,belev,telev,connlen,connwidth )
+ (
+ 0,
+ 0,
+ (0, 0, 0),
+ "HORIZONTAL",
+ bedleak,
+ 10,
+ 10,
+ connlen,
+ connwidth,
+ ),
+ (0, 1, (1, 0, 1), "VERTICAL", bedleak, 10, 10, connlen, connwidth),
+ (
+ 0,
+ 2,
+ (1, 0, 1),
+ "HORIZONTAL",
+ bedleak,
+ 10,
+ 10,
+ connlen,
+ connwidth,
+ ),
+ (0, 3, (2, 0, 2), "VERTICAL", bedleak, 10, 10, connlen, connwidth),
+ (
+ 0,
+ 4,
+ (2, 0, 2),
+ "HORIZONTAL",
+ bedleak,
+ 10,
+ 10,
+ connlen,
+ connwidth,
+ ),
+ (0, 5, (3, 0, 3), "VERTICAL", bedleak, 10, 10, connlen, connwidth),
+ (
+ 0,
+ 6,
+ (2, 0, 4),
+ "HORIZONTAL",
+ bedleak,
+ 10,
+ 10,
+ connlen,
+ connwidth,
+ ),
+ (0, 7, (2, 0, 4), "VERTICAL", bedleak, 10, 10, connlen, connwidth),
+ (
+ 0,
+ 8,
+ (1, 0, 5),
+ "HORIZONTAL",
+ bedleak,
+ 10,
+ 10,
+ connlen,
+ connwidth,
+ ),
+ (0, 9, (1, 0, 5), "VERTICAL", bedleak, 10, 10, connlen, connwidth),
+ (
+ 0,
+ 10,
+ (0, 0, 6),
+ "HORIZONTAL",
+ bedleak,
+ 10,
+ 10,
+ connlen,
+ connwidth,
+ ),
+ ]
+
+ # period data
+ p_data = [
+ (0, "STATUS", "ACTIVE"),
+ ]
+
+ # note: for specifying lake number, use fortran indexing!
+ fname = f"{gwfname}.lak.obs.csv"
+ lak_obs = {
+ fname: [
+ ("lakestage", "stage", 1),
+ ("lakevolume", "volume", 1),
+ ("lak1", "lak", 1, 1),
+ ("lak2", "lak", 1, 2),
+ ("lak3", "lak", 1, 3),
+ ("lak4", "lak", 1, 4),
+ ("lak5", "lak", 1, 5),
+ ("lak6", "lak", 1, 6),
+ ("lak7", "lak", 1, 7),
+ ("lak8", "lak", 1, 8),
+ ("lak9", "lak", 1, 9),
+ ("lak10", "lak", 1, 10),
+ ("lak11", "lak", 1, 11),
+ ],
+ "digits": 10,
+ }
+
+ lak = flopy.mf6.modflow.ModflowGwflak(
+ gwf,
+ save_flows=True,
+ print_input=True,
+ print_flows=True,
+ print_stage=True,
+ stage_filerecord=f"{gwfname}.lak.bin",
+ budget_filerecord=f"{gwfname}.lak.bud",
+ nlakes=len(pak_data),
+ ntables=0,
+ packagedata=pak_data,
+ pname="LAK-1",
+ connectiondata=con_data,
+ perioddata=p_data,
+ observations=lak_obs,
+ auxiliary=["TESTAUXVAR", "CONCENTRATION"],
+ )
+
+ # output control
+ oc = flopy.mf6.ModflowGwfoc(
+ gwf,
+ budget_filerecord=f"{gwfname}.cbc",
+ head_filerecord=f"{gwfname}.hds",
+ headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")],
+ saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")],
+ printrecord=[("HEAD", "LAST"), ("BUDGET", "LAST")],
+ )
+
+ # create gwt model
+ transport = True
+ if transport:
+ gwt = flopy.mf6.ModflowGwt(sim, modelname=gwtname)
- imsgwf = flopy.mf6.ModflowIms(
+ imsgwt = flopy.mf6.ModflowIms(
sim,
print_option="ALL",
outer_dvclose=hclose,
@@ -97,15 +292,12 @@ def case_generator(self, data, function_tmpdir):
scaling_method="NONE",
reordering_method="NONE",
relaxation_factor=relax,
- filename=f"{gwfname}.ims",
+ filename=f"{gwtname}.ims",
)
+ sim.register_ims_package(imsgwt, [gwt.name])
- idomain = np.full((nlay, nrow, ncol), 1)
- idomain[0, 0, 1:6] = 0
- idomain[1, 0, 2:5] = 0
- idomain[2, 0, 3:4] = 0
- dis = flopy.mf6.ModflowGwfdis(
- gwf,
+ dis = flopy.mf6.ModflowGwtdis(
+ gwt,
nlay=nlay,
nrow=nrow,
ncol=ncol,
@@ -117,363 +309,150 @@ def case_generator(self, data, function_tmpdir):
)
# initial conditions
- strt = np.zeros((nlay, nrow, ncol), dtype=float)
- strt[0, 0, :] = 3.5
- strt[1, 0, :] = 3.0
- strt[1, 0, 1:6] = 2.5
- strt[2, 0, :] = 2.0
- strt[3, 0, :] = 1.0
- ic = flopy.mf6.ModflowGwfic(gwf, strt=strt)
-
- # node property flow
- npf = flopy.mf6.ModflowGwfnpf(
- gwf,
- xt3doptions=False,
- save_flows=True,
- save_specific_discharge=True,
- icelltype=1,
- k=Kh,
- k33=Kv,
- )
+ ic = flopy.mf6.ModflowGwtic(gwt, strt=gwt_conc[idx])
- sto = flopy.mf6.ModflowGwfsto(gwf, sy=0.3, ss=0.0, iconvert=1)
-
- buy_on = True
- if buy_on:
- pd = [(0, 0.7, 0.0, gwtname, "CONCENTRATION")]
- buy = flopy.mf6.ModflowGwfbuy(gwf, denseref=1000.0, packagedata=pd)
-
- nlakeconn = 11 # note: number of connections for this lake
- # pak_data = [lakeno, strt, nlakeconn, testauxvar, concentration, boundname]
- pak_data = [(0, 2.25, nlakeconn, 0.0, 0.0)]
-
- connlen = delr / 2.0
- connwidth = delc
- bedleak = "None"
- con_data = [
- # con_data=(lakeno,iconn,(cellid),claktype,bedleak,belev,telev,connlen,connwidth )
- (
- 0,
- 0,
- (0, 0, 0),
- "HORIZONTAL",
- bedleak,
- 10,
- 10,
- connlen,
- connwidth,
- ),
- (0, 1, (1, 0, 1), "VERTICAL", bedleak, 10, 10, connlen, connwidth),
- (
- 0,
- 2,
- (1, 0, 1),
- "HORIZONTAL",
- bedleak,
- 10,
- 10,
- connlen,
- connwidth,
- ),
- (0, 3, (2, 0, 2), "VERTICAL", bedleak, 10, 10, connlen, connwidth),
- (
- 0,
- 4,
- (2, 0, 2),
- "HORIZONTAL",
- bedleak,
- 10,
- 10,
- connlen,
- connwidth,
- ),
- (0, 5, (3, 0, 3), "VERTICAL", bedleak, 10, 10, connlen, connwidth),
- (
- 0,
- 6,
- (2, 0, 4),
- "HORIZONTAL",
- bedleak,
- 10,
- 10,
- connlen,
- connwidth,
- ),
- (0, 7, (2, 0, 4), "VERTICAL", bedleak, 10, 10, connlen, connwidth),
- (
- 0,
- 8,
- (1, 0, 5),
- "HORIZONTAL",
- bedleak,
- 10,
- 10,
- connlen,
- connwidth,
- ),
- (0, 9, (1, 0, 5), "VERTICAL", bedleak, 10, 10, connlen, connwidth),
- (
- 0,
- 10,
- (0, 0, 6),
- "HORIZONTAL",
- bedleak,
- 10,
- 10,
- connlen,
- connwidth,
- ),
- ]
+ # advection
+ adv = flopy.mf6.ModflowGwtadv(gwt, scheme="UPSTREAM")
- # period data
- p_data = [
- (0, "STATUS", "ACTIVE"),
- ]
+ # storage
+ porosity = 0.30
+ sto = flopy.mf6.ModflowGwtmst(gwt, porosity=porosity)
- # note: for specifying lake number, use fortran indexing!
- fname = f"{gwfname}.lak.obs.csv"
- lak_obs = {
- fname: [
- ("lakestage", "stage", 1),
- ("lakevolume", "volume", 1),
- ("lak1", "lak", 1, 1),
- ("lak2", "lak", 1, 2),
- ("lak3", "lak", 1, 3),
- ("lak4", "lak", 1, 4),
- ("lak5", "lak", 1, 5),
- ("lak6", "lak", 1, 6),
- ("lak7", "lak", 1, 7),
- ("lak8", "lak", 1, 8),
- ("lak9", "lak", 1, 9),
- ("lak10", "lak", 1, 10),
- ("lak11", "lak", 1, 11),
- ],
- "digits": 10,
- }
+ # sources
+ sourcerecarray = [
+ (),
+ ]
+ ssm = flopy.mf6.ModflowGwtssm(gwt, sources=sourcerecarray)
- lak = flopy.mf6.modflow.ModflowGwflak(
- gwf,
+ lktpackagedata = [
+ (0, lak_conc[idx], 99.0, 999.0, "mylake"),
+ ]
+ lkt = flopy.mf6.modflow.ModflowGwtlkt(
+ gwt,
+ boundnames=True,
save_flows=True,
print_input=True,
print_flows=True,
- print_stage=True,
- stage_filerecord=f"{gwfname}.lak.bin",
- budget_filerecord=f"{gwfname}.lak.bud",
- nlakes=len(pak_data),
- ntables=0,
- packagedata=pak_data,
- pname="LAK-1",
- connectiondata=con_data,
- perioddata=p_data,
- observations=lak_obs,
- auxiliary=["TESTAUXVAR", "CONCENTRATION"],
+ print_concentration=True,
+ concentration_filerecord=gwtname + ".lkt.bin",
+ budget_filerecord="gwtlak1.bud",
+ packagedata=lktpackagedata,
+ pname="LKT-1",
+ flow_package_name="LAK-1",
+ flow_package_auxiliary_name="CONCENTRATION",
+ auxiliary=["aux1", "aux2"],
)
-
# output control
- oc = flopy.mf6.ModflowGwfoc(
- gwf,
- budget_filerecord=f"{gwfname}.cbc",
- head_filerecord=f"{gwfname}.hds",
- headprintrecord=[
+ oc = flopy.mf6.ModflowGwtoc(
+ gwt,
+ budget_filerecord=f"{gwtname}.cbc",
+ concentration_filerecord=f"{gwtname}.ucn",
+ concentrationprintrecord=[
("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")
],
- saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")],
- printrecord=[("HEAD", "LAST"), ("BUDGET", "LAST")],
+ saverecord=[("CONCENTRATION", "ALL")],
+ printrecord=[("CONCENTRATION", "ALL"), ("BUDGET", "ALL")],
)
- # create gwt model
- transport = True
- if transport:
- gwt = flopy.mf6.ModflowGwt(sim, modelname=gwtname)
-
- imsgwt = flopy.mf6.ModflowIms(
- sim,
- print_option="ALL",
- outer_dvclose=hclose,
- outer_maximum=nouter,
- under_relaxation="NONE",
- inner_maximum=ninner,
- inner_dvclose=hclose,
- rcloserecord=f"{rclose} strict",
- linear_acceleration="BICGSTAB",
- scaling_method="NONE",
- reordering_method="NONE",
- relaxation_factor=relax,
- filename=f"{gwtname}.ims",
- )
- sim.register_ims_package(imsgwt, [gwt.name])
-
- dis = flopy.mf6.ModflowGwtdis(
- gwt,
- nlay=nlay,
- nrow=nrow,
- ncol=ncol,
- delr=delr,
- delc=delc,
- top=top,
- botm=botm,
- idomain=idomain,
- )
-
- # initial conditions
- strt = data.gwt_conc
- ic = flopy.mf6.ModflowGwtic(gwt, strt=strt)
-
- # advection
- adv = flopy.mf6.ModflowGwtadv(gwt, scheme="UPSTREAM")
-
- # storage
- porosity = 0.30
- sto = flopy.mf6.ModflowGwtmst(gwt, porosity=porosity)
-
- # sources
- sourcerecarray = [
- (),
- ]
- ssm = flopy.mf6.ModflowGwtssm(gwt, sources=sourcerecarray)
-
- lak_conc = data.lak_conc
- lktpackagedata = [
- (0, lak_conc, 99.0, 999.0, "mylake"),
- ]
- lkt = flopy.mf6.modflow.ModflowGwtlkt(
- gwt,
- boundnames=True,
- save_flows=True,
- print_input=True,
- print_flows=True,
- print_concentration=True,
- concentration_filerecord=gwtname + ".lkt.bin",
- budget_filerecord="gwtlak1.bud",
- packagedata=lktpackagedata,
- pname="LKT-1",
- flow_package_name="LAK-1",
- flow_package_auxiliary_name="CONCENTRATION",
- auxiliary=["aux1", "aux2"],
- )
- # output control
- oc = flopy.mf6.ModflowGwtoc(
- gwt,
- budget_filerecord=f"{gwtname}.cbc",
- concentration_filerecord=f"{gwtname}.ucn",
- concentrationprintrecord=[
- ("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")
- ],
- saverecord=[("CONCENTRATION", "ALL")],
- printrecord=[("CONCENTRATION", "ALL"), ("BUDGET", "ALL")],
- )
-
- fmi = flopy.mf6.ModflowGwtfmi(gwt, flow_imbalance_correction=True)
-
- # GWF GWT exchange
- gwfgwt = flopy.mf6.ModflowGwfgwt(
- sim,
- exgtype="GWF6-GWT6",
- exgmnamea=gwfname,
- exgmnameb=gwtname,
- filename=f"{data.name}.gwfgwt",
- )
-
- return data, sim, None, self.eval_results
-
- def eval_results(self, sim, data):
- print("evaluating results...")
-
- # calculate volume of water and make sure it is conserved
- gwfname = "gwf_" + data.name
- gwtname = "gwt_" + data.name
- fname = gwfname + ".lak.bin"
- fname = os.path.join(sim.simpath, fname)
- assert os.path.isfile(fname)
- bobj = flopy.utils.HeadFile(fname, text="STAGE")
- stage = bobj.get_alldata().flatten()
- # print(stage)
-
- fname = gwfname + ".hds"
- fname = os.path.join(sim.simpath, fname)
- assert os.path.isfile(fname)
- hobj = flopy.utils.HeadFile(fname)
- head = hobj.get_data()
- # print(head)
-
- fname = gwtname + ".ucn"
- fname = os.path.join(sim.simpath, fname)
- assert os.path.isfile(fname)
- cobj = flopy.utils.HeadFile(fname, text="CONCENTRATION")
- conc = cobj.get_data()
-
- fname = gwtname + ".lkt.bin"
- fname = os.path.join(sim.simpath, fname)
- assert os.path.isfile(fname)
- cobj = flopy.utils.HeadFile(fname, text="CONCENTRATION")
- clak = cobj.get_data().flatten()
-
- # calculate initial water volume
- v0 = 3.5 * 2 # outermost columns
- v0 += 2.5 * 2 # next innermost columns
- v0 += 2.0 * 2 # next innermost columns
- v0 += 1.0 * 1 # middle column
- v0 = v0 * 0.3 # specific yield
-
- m0 = v0 * data.gwt_conc
- vl0 = (2.25 - 2.0) * 2 + (2.25 - 1.0)
- m0 += vl0 * data.lak_conc
- v0 += vl0
- print(f"initial volume of water in model = {v0}")
- print(f"initial mass of solute in model = {m0}")
-
- # calculate ending water volume in model
- head = np.where(head > 1e10, -1e10, head)
- botm = [3, 2, 1, 0]
- top = [4, 3, 2, 1]
- nlay, nrow, ncol = head.shape
- v = 0
- m = 0.0
- for k in range(nlay):
- for i in range(nrow):
- for j in range(ncol):
- h = min(head[k, i, j], top[k])
- dz = h - botm[k]
- vcell = max(dz, 0.0) * 0.3
- v += vcell
- m += vcell * conc[k, i, j]
-
- s = stage[-1]
- vl = (s - 2.0) * 2 + (s - 1.0)
- v = v + vl
- m += vl * clak[0]
- print(f"final volume of water in model = {v}")
- print(f"final mass of solute in model = {m}")
-
- # check to make sure starting water volume same as equalized final volume
- errmsg = f"initial and final water volume not equal: {v0} {v}"
- assert np.allclose(v0, v), errmsg
-
- # check to make sure starting starting solute mass same as equalized solute mass
- errmsg = f"initial and final solute mass not equal: {m0} {m}"
- assert np.allclose(m0, m), errmsg
-
- # todo: add a better check of the lake concentrations
-
-
-@parametrize_with_cases(
- "case",
- cases=[
- GwfBuyLakCases,
- ],
-)
-def test_mf6model(case, targets):
- data, sim, cmp, evl = case
- sim.write_simulation()
- if cmp:
- cmp.write_simulation()
-
- simulation = TestSimulation(
- name=data.name, exe_dict=targets, exfunc=evl, idxsim=0
- )
- simulation.set_model(
- sim.simulation_data.mfpath.get_sim_path(), testModel=False
+ fmi = flopy.mf6.ModflowGwtfmi(gwt, flow_imbalance_correction=True)
+
+ # GWF GWT exchange
+ gwfgwt = flopy.mf6.ModflowGwfgwt(
+ sim,
+ exgtype="GWF6-GWT6",
+ exgmnamea=gwfname,
+ exgmnameb=gwtname,
+ filename=f"{name}.gwfgwt",
+ )
+
+ return sim
+
+
+def check_output(idx, test):
+ # calculate volume of water and make sure it is conserved
+ gwfname = "gwf_" + test.name
+ gwtname = "gwt_" + test.name
+ fname = gwfname + ".lak.bin"
+ fname = os.path.join(test.workspace, fname)
+ assert os.path.isfile(fname)
+ bobj = flopy.utils.HeadFile(fname, text="STAGE")
+ stage = bobj.get_alldata().flatten()
+ # print(stage)
+
+ fname = gwfname + ".hds"
+ fname = os.path.join(test.workspace, fname)
+ assert os.path.isfile(fname)
+ hobj = flopy.utils.HeadFile(fname)
+ head = hobj.get_data()
+ # print(head)
+
+ fname = gwtname + ".ucn"
+ fname = os.path.join(test.workspace, fname)
+ assert os.path.isfile(fname)
+ cobj = flopy.utils.HeadFile(fname, text="CONCENTRATION")
+ conc = cobj.get_data()
+
+ fname = gwtname + ".lkt.bin"
+ fname = os.path.join(test.workspace, fname)
+ assert os.path.isfile(fname)
+ cobj = flopy.utils.HeadFile(fname, text="CONCENTRATION")
+ clak = cobj.get_data().flatten()
+
+ # calculate initial water volume
+ v0 = 3.5 * 2 # outermost columns
+ v0 += 2.5 * 2 # next innermost columns
+ v0 += 2.0 * 2 # next innermost columns
+ v0 += 1.0 * 1 # middle column
+ v0 = v0 * 0.3 # specific yield
+
+ m0 = v0 * gwt_conc[idx]
+ vl0 = (2.25 - 2.0) * 2 + (2.25 - 1.0)
+ m0 += vl0 * lak_conc[idx]
+ v0 += vl0
+ print(f"initial volume of water in model = {v0}")
+ print(f"initial mass of solute in model = {m0}")
+
+ # calculate ending water volume in model
+ head = np.where(head > 1e10, -1e10, head)
+ botm = [3, 2, 1, 0]
+ top = [4, 3, 2, 1]
+ nlay, nrow, ncol = head.shape
+ v = 0
+ m = 0.0
+ for k in range(nlay):
+ for i in range(nrow):
+ for j in range(ncol):
+ h = min(head[k, i, j], top[k])
+ dz = h - botm[k]
+ vcell = max(dz, 0.0) * 0.3
+ v += vcell
+ m += vcell * conc[k, i, j]
+
+ s = stage[-1]
+ vl = (s - 2.0) * 2 + (s - 1.0)
+ v = v + vl
+ m += vl * clak[0]
+ print(f"final volume of water in model = {v}")
+ print(f"final mass of solute in model = {m}")
+
+ # check to make sure starting water volume same as equalized final volume
+ errmsg = f"initial and final water volume not equal: {v0} {v}"
+ assert np.allclose(v0, v), errmsg
+
+ # check to make sure starting starting solute mass same as equalized solute mass
+ errmsg = f"initial and final solute mass not equal: {m0} {m}"
+ assert np.allclose(m0, m), errmsg
+
+ # todo: add a better check of the lake concentrations
+
+
+@pytest.mark.parametrize("idx, name", enumerate(cases))
+def test_mf6model(idx, name, targets, function_tmpdir):
+ framework = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ targets=targets,
)
- simulation.run()
- simulation.compare()
- evl(simulation, data)
+ framework.run()
diff --git a/autotest/test_gwf_buy_maw01.py b/autotest/test_gwf_buy_maw01.py
index 09e96b37579..8def29a12ae 100644
--- a/autotest/test_gwf_buy_maw01.py
+++ b/autotest/test_gwf_buy_maw01.py
@@ -1,29 +1,30 @@
-# Test the buoyancy package and the variable density flows between maw
-# and the gwf model. This model has 4 layers with a single maw.
-# The model is transient and has heads in the aquifer higher than the initial
-# stage in the well. As the model runs, the well and aquifer equalize and
-# should end up at the same level. The test ensures that the initial and
-# final water volumes in the entire system are the same. There are three
-# different cases:
-# 1. No buoyancy package
-# 2. Buoyancy package with maw and aquifer density = 1000.
-# 3. Buoyancy package with maw and aquifer density = 1024.5
+"""
+Test the buoyancy package and the variable density flows between maw
+and the gwf model. This model has 4 layers with a single maw.
+The model is transient and has heads in the aquifer higher than the initial
+stage in the well. As the model runs, the well and aquifer equalize and
+should end up at the same level. The test ensures that the initial and
+final water volumes in the entire system are the same. There are three
+different cases:
+ 1. No buoyancy package
+ 2. Buoyancy package with maw and aquifer density = 1000.
+ 3. Buoyancy package with maw and aquifer density = 1024.5
+"""
import os
-import sys
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
-ex = ["buy_maw_01a"] # , 'buy_maw_01b', 'buy_maw_01c']
+cases = ["buy_maw_01a"] # , 'buy_maw_01b', 'buy_maw_01c']
buy_on_list = [False] # , True, True]
concbuylist = [0.0] # , 0., 35.]
-def build_model(idx, dir):
+def build_models(idx, test):
lx = 7.0
lz = 4.0
nlay = 4
@@ -50,10 +51,10 @@ def build_model(idx, dir):
nouter, ninner = 700, 10
hclose, rclose, relax = 1e-8, 1e-6, 0.97
- name = ex[idx]
+ name = cases[idx]
# build MODFLOW 6 files
- ws = dir
+ ws = test.workspace
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
@@ -128,16 +129,16 @@ def build_model(idx, dir):
mawstrt = 3.5
mawcondeqn = "THIEM"
mawngwfnodes = nlay
- #
+ #
mawpackagedata = [
[0, mawradius, mawbottom, mawstrt, mawcondeqn, mawngwfnodes, mawdense]
]
- #
+ #
mawconnectiondata = [
[0, icon, (icon, 0, 0), top, mawbottom, -999.0, -999.0]
for icon in range(nlay)
]
- #
+ #
mawperioddata = [[0, "STATUS", "ACTIVE"]]
maw = flopy.mf6.ModflowGwfmaw(
gwf,
@@ -185,19 +186,17 @@ def build_model(idx, dir):
return sim, None
-def eval_results(sim):
- print("evaluating results...")
-
+def check_output(idx, test):
# calculate volume of water and make sure it is conserved
- gwfname = "gwf_" + sim.name
+ gwfname = "gwf_" + test.name
fname = gwfname + ".maw.bin"
- fname = os.path.join(sim.simpath, fname)
+ fname = os.path.join(test.workspace, fname)
assert os.path.isfile(fname)
bobj = flopy.utils.HeadFile(fname, text="HEAD")
stage = bobj.get_alldata().flatten()
fname = gwfname + ".hds"
- fname = os.path.join(sim.simpath, fname)
+ fname = os.path.join(test.workspace, fname)
assert os.path.isfile(fname)
hobj = flopy.utils.HeadFile(fname)
head = hobj.get_alldata()
@@ -214,7 +213,6 @@ def eval_results(sim):
# calculate current volume of water in well and aquifer and compare with
# initial volume
for kstp, mawstage in enumerate(stage):
-
vgwf = 0
for k in range(nlay):
for j in range(ncol):
@@ -229,13 +227,13 @@ def eval_results(sim):
# compare the maw-gwf flows in maw budget file with the gwf-maw flows in
# gwf budget file. Values should be the same but reversed in sign
fname = gwfname + ".maw.bud"
- fname = os.path.join(sim.simpath, fname)
+ fname = os.path.join(test.workspace, fname)
assert os.path.isfile(fname)
mbud = flopy.utils.CellBudgetFile(fname, precision="double")
maw_gwf = mbud.get_data(text="GWF")
fname = gwfname + ".cbc"
- fname = os.path.join(sim.simpath, fname)
+ fname = os.path.join(test.workspace, fname)
assert os.path.isfile(fname)
gbud = flopy.utils.CellBudgetFile(fname, precision="double")
gwf_maw = gbud.get_data(text="MAW")
@@ -251,16 +249,13 @@ def eval_results(sim):
assert np.allclose(qmaw, -qgwf), msg
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, 0, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, exfunc=eval_results, idxsim=0
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ targets=targets,
)
+ test.run()
diff --git a/autotest/test_gwf_buy_sfr01.py b/autotest/test_gwf_buy_sfr01.py
index 24f9bb12122..cf49f0d198b 100644
--- a/autotest/test_gwf_buy_sfr01.py
+++ b/autotest/test_gwf_buy_sfr01.py
@@ -1,19 +1,20 @@
-# Simple one-layer model with sfr on top. Purpose is to test buy package in a
-# one-d sfr network.
+"""
+Simple one-layer model with sfr on top. Purpose is to test buy package in a
+one-d sfr network.
+"""
import os
-import sys
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
-ex = ["buy_sfr_01"]
+cases = ["buy_sfr_01"]
-def build_model(idx, dir):
+def build_models(idx, test):
lx = 7.0
lz = 1.0
nlay = 1
@@ -44,10 +45,10 @@ def build_model(idx, dir):
nouter, ninner = 700, 300
hclose, rclose, relax = 1e-8, 1e-6, 0.97
- name = ex[idx]
+ name = cases[idx]
# build MODFLOW 6 files
- ws = dir
+ ws = test.workspace
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
@@ -372,16 +373,14 @@ def build_model(idx, dir):
return sim, None
-def eval_results(sim):
- print("evaluating results...")
-
+def check_output(idx, test):
# assign names
- gwtname = "gwt_" + sim.name
- gwfname = "gwf_" + sim.name
+ gwtname = "gwt_" + test.name
+ gwfname = "gwf_" + test.name
# load the sft concentrations and make sure all values are correct
fname = gwtname + ".sft.bin"
- fname = os.path.join(sim.simpath, fname)
+ fname = os.path.join(test.workspace, fname)
assert os.path.isfile(fname)
cobj = flopy.utils.HeadFile(fname, text="CONCENTRATION")
csftall = cobj.get_alldata()
@@ -391,21 +390,21 @@ def eval_results(sim):
# load the aquifer concentrations
fname = gwtname + ".ucn"
- fname = os.path.join(sim.simpath, fname)
+ fname = os.path.join(test.workspace, fname)
cobj = flopy.utils.HeadFile(fname, text="CONCENTRATION")
cgwfall = cobj.get_alldata()
cgwf = cgwfall[-2].flatten()
# load the aquifer heads
fname = gwfname + ".hds"
- fname = os.path.join(sim.simpath, fname)
+ fname = os.path.join(test.workspace, fname)
hobj = flopy.utils.HeadFile(fname, text="HEAD")
headall = hobj.get_alldata()
head = headall[-1].flatten()
# load the sfr budget file and get sfr/gwf flows
fname = gwfname + ".sfr.bud"
- fname = os.path.join(sim.simpath, fname)
+ fname = os.path.join(test.workspace, fname)
assert os.path.isfile(fname)
bobj = flopy.utils.CellBudgetFile(fname, precision="double", verbose=False)
qsfrgwfsimall = bobj.get_data(text="GWF")
@@ -415,7 +414,7 @@ def eval_results(sim):
# load the sfr budget and check to make sure that concentrations are set
# correctly from sft concentrations
fname = gwfname + ".sfr.bud"
- fname = os.path.join(sim.simpath, fname)
+ fname = os.path.join(test.workspace, fname)
assert os.path.isfile(fname)
bobj = flopy.utils.CellBudgetFile(fname, precision="double", verbose=False)
b = bobj.get_data(text="AUXILIARY")
@@ -427,7 +426,7 @@ def eval_results(sim):
# load the sfr stage file
# load the aquifer concentrations and make sure all values are correct
fname = gwfname + ".sfr.stg"
- fname = os.path.join(sim.simpath, fname)
+ fname = os.path.join(test.workspace, fname)
stgobj = flopy.utils.HeadFile(fname, text="STAGE")
stageall = stgobj.get_alldata()
stage = stageall[-1]
@@ -457,13 +456,13 @@ def eval_results(sim):
), f"reach {n} flow {qcalc} not equal {qsim}"
-@pytest.mark.parametrize("name", ex)
-def test_mf6model(name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, 0, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, exfunc=eval_results, idxsim=0
- ),
- str(function_tmpdir),
+@pytest.mark.parametrize("idx, name", enumerate(cases))
+def test_mf6model(idx, name, function_tmpdir, targets):
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ targets=targets,
)
+ test.run()
diff --git a/autotest/test_gwf_chd01.py b/autotest/test_gwf_chd01.py
index 0dabc3ac06d..0ee33dab664 100644
--- a/autotest/test_gwf_chd01.py
+++ b/autotest/test_gwf_chd01.py
@@ -3,15 +3,15 @@
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
-ex = [
+cases = [
"chd01",
]
-def build_model(idx, dir):
+def build_models(idx, test):
nlay, nrow, ncol = 1, 1, 100
nper = 1
perlen = [5.0]
@@ -34,10 +34,10 @@ def build_model(idx, dir):
for i in range(nper):
tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
- name = ex[idx]
+ name = cases[idx]
# build MODFLOW 6 files
- ws = dir
+ ws = test.workspace
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
@@ -118,12 +118,10 @@ def build_model(idx, dir):
return sim, None
-def eval_model(sim):
- print("evaluating model...")
-
- gwfname = "gwf_" + sim.name
+def check_output(idx, test):
+ gwfname = "gwf_" + test.name
- fpth = os.path.join(sim.simpath, f"{gwfname}.hds")
+ fpth = os.path.join(test.workspace, f"{gwfname}.hds")
hobj = flopy.utils.HeadFile(fpth, precision="double")
head = hobj.get_data().flatten()
@@ -134,13 +132,13 @@ def eval_model(sim):
), "simulated head do not match with known solution."
-@pytest.mark.parametrize("name", ex)
-def test_mf6model(name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, 0, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, exfunc=eval_model, idxsim=0
- ),
- str(function_tmpdir),
+@pytest.mark.parametrize("idx, name", enumerate(cases))
+def test_mf6model(idx, name, function_tmpdir, targets):
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ targets=targets,
)
+ test.run()
diff --git a/autotest/test_gwf_chd02.py b/autotest/test_gwf_chd02.py
index 019a00917d4..af30d11a1b1 100644
--- a/autotest/test_gwf_chd02.py
+++ b/autotest/test_gwf_chd02.py
@@ -4,18 +4,18 @@
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
-ex = [
+cases = [
"chd02",
]
-def build_model(idx, workspace):
- name = ex[idx]
+def build_models(idx, test):
+ name = cases[idx]
nlay, nrow, ncol = 1, 1, 10
- sim = flopy.mf6.MFSimulation(sim_ws=workspace, sim_name=name)
+ sim = flopy.mf6.MFSimulation(sim_ws=test.workspace, sim_name=name)
flopy.mf6.ModflowTdis(sim)
flopy.mf6.ModflowIms(sim, complexity="simple")
gwf = flopy.mf6.ModflowGwf(sim, modelname=name, print_input=True)
@@ -38,8 +38,8 @@ def build_model(idx, workspace):
strt=10.0,
)
chd_data = [
- (0, 0, 0, 10.0, 1.0, 100.),
- (0, 0, ncol - 1, 5.0, 0.0, 100.),
+ (0, 0, 0, 10.0, 1.0, 100.0),
+ (0, 0, ncol - 1, 5.0, 0.0, 100.0),
]
chd_data = {
0: {
@@ -63,12 +63,10 @@ def build_model(idx, workspace):
return sim, None
-def eval_model(sim):
- print("evaluating model...")
-
- name = sim.name
+def check_output(idx, test):
+ name = test.name
- fpth = os.path.join(sim.simpath, f"{name}.hds")
+ fpth = os.path.join(test.workspace, f"{name}.hds")
hobj = flopy.utils.HeadFile(fpth, precision="double")
head = hobj.get_data().flatten()
@@ -92,13 +90,13 @@ def eval_model(sim):
), "simulated head does not match with known solution."
-@pytest.mark.parametrize("name", ex)
-def test_mf6model(name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, 0, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, exfunc=eval_model, idxsim=0
- ),
- str(function_tmpdir),
+@pytest.mark.parametrize("idx, name", enumerate(cases))
+def test_mf6model(idx, name, function_tmpdir, targets):
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ targets=targets,
)
+ test.run()
diff --git a/autotest/test_gwf_csub_db01_nr.py b/autotest/test_gwf_csub_db01_nr.py
index 58b16e09d05..7f96b7707c0 100644
--- a/autotest/test_gwf_csub_db01_nr.py
+++ b/autotest/test_gwf_csub_db01_nr.py
@@ -3,10 +3,10 @@
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
-ex = (
+cases = (
"csub_db01a",
"csub_db01b",
"csub_db01c",
@@ -59,10 +59,10 @@
nlay, nrow, ncol = 2, 1, 2
nper = 3
tsp0 = 1.0
-perlen = [tsp0] + [365.2500000 for i in range(nper - 1)]
-nstp = [1] + [200 for i in range(nper - 1)]
-tsmult = [1.0] + [1.0 for i in range(nper - 1)]
-steady = [True] + [False for i in range(nper - 1)]
+perlen = [tsp0] + [365.2500000 for _ in range(nper - 1)]
+nstp = [1] + [200 for _ in range(nper - 1)]
+tsmult = [1.0] + [1.0 for _ in range(nper - 1)]
+steady = [True] + [False for _ in range(nper - 1)]
delr, delc = 1000.0, 1000.0
top = 0.0
botm = [-10.0, -20.0]
@@ -81,8 +81,8 @@
hclose, rclose, relax = 1e-9, 1e-3, 1.0
tdis_rc = []
-for idx in range(nper):
- tdis_rc.append((perlen[idx], nstp[idx], tsmult[idx]))
+for i in range(nper):
+ tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
# all cells are active
ib = 1
@@ -106,12 +106,12 @@
H0 = 0.0
-def build_model(idx, dir):
- name = ex[idx]
+def build_models(idx, test):
+ name = cases[idx]
newton = newtons[idx]
# build MODFLOW 6 files
- ws = dir
+ ws = test.workspace
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
@@ -323,18 +323,16 @@ def build_model(idx, dir):
return sim, None
-def eval_comp(sim):
- print("evaluating compaction...")
-
+def check_output(idx, test):
# MODFLOW 6 total compaction results
- fpth = os.path.join(sim.simpath, "csub_obs.csv")
+ fpth = os.path.join(test.workspace, "csub_obs.csv")
try:
tc = np.genfromtxt(fpth, names=True, delimiter=",")
except:
assert False, f'could not load data from "{fpth}"'
# get results from listing file
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.lst")
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.lst")
budl = flopy.utils.Mf6ListBudget(fpth)
names = list(bud_lst)
d0 = budl.get_budget(names=names)[0]
@@ -346,11 +344,11 @@ def eval_comp(sim):
d = np.recarray(nbud, dtype=dtype)
for key in bud_lst:
d[key] = 0.0
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.cbc")
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.cbc")
cobj = flopy.utils.CellBudgetFile(fpth, precision="double")
kk = cobj.get_kstpkper()
times = cobj.get_times()
- for idx, (k, t) in enumerate(zip(kk, times)):
+ for i, (k, t) in enumerate(zip(kk, times)):
for text in cbc_bud:
qin = 0.0
qout = 0.0
@@ -363,13 +361,13 @@ def eval_comp(sim):
qout -= vv
else:
qin += vv
- d["totim"][idx] = t
- d["time_step"][idx] = k[0]
+ d["totim"][i] = t
+ d["time_step"][i] = k[0]
d["stress_period"] = k[1]
key = f"{text}_IN"
- d[key][idx] = qin
+ d[key][i] = qin
key = f"{text}_OUT"
- d[key][idx] = qout
+ d[key][i] = qout
diff = np.zeros((nbud, len(bud_lst)), dtype=float)
for idx, key in enumerate(bud_lst):
@@ -379,48 +377,41 @@ def eval_comp(sim):
# write summary
fpth = os.path.join(
- sim.simpath, f"{os.path.basename(sim.name)}.bud.cmp.out"
+ test.workspace, f"{os.path.basename(test.name)}.bud.cmp.out"
)
- f = open(fpth, "w")
- for i in range(diff.shape[0]):
- if i == 0:
- line = f"{'TIME':>10s}"
- for idx, key in enumerate(bud_lst):
- line += f"{key + '_LST':>25s}"
- line += f"{key + '_CBC':>25s}"
- line += f"{key + '_DIF':>25s}"
+ with open(fpth, "w") as f:
+ for i in range(diff.shape[0]):
+ if i == 0:
+ line = f"{'TIME':>10s}"
+ for key in bud_lst:
+ line += f"{key + '_LST':>25s}"
+ line += f"{key + '_CBC':>25s}"
+ line += f"{key + '_DIF':>25s}"
+ f.write(line + "\n")
+ line = f"{d['totim'][i]:10g}"
+ for ii, key in enumerate(bud_lst):
+ line += f"{d0[key][i]:25g}"
+ line += f"{d[key][i]:25g}"
+ line += f"{diff[i, ii]:25g}"
f.write(line + "\n")
- line = f"{d['totim'][i]:10g}"
- for idx, key in enumerate(bud_lst):
- line += f"{d0[key][i]:25g}"
- line += f"{d[key][i]:25g}"
- line += f"{diff[i, idx]:25g}"
- f.write(line + "\n")
- f.close()
if diffmax > budtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name,
- exe_dict=targets,
- exfunc=eval_comp,
- htol=htol,
- idxsim=idx,
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ targets=targets,
+ htol=htol,
)
+ test.run()
diff --git a/autotest/test_gwf_csub_dbgeo01.py b/autotest/test_gwf_csub_dbgeo01.py
index c899557c3a8..2f49d4299ac 100644
--- a/autotest/test_gwf_csub_dbgeo01.py
+++ b/autotest/test_gwf_csub_dbgeo01.py
@@ -3,10 +3,10 @@
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
-ex = ["csub_dbgeo01a"]
+cases = ["csub_dbgeo01a"]
ndcell = [19]
strt = [0.0]
chdh = [0]
@@ -130,13 +130,13 @@
# temporal discretization
nper = 1
-perlen = [1000.0 for i in range(nper)]
-nstp = [100 for i in range(nper)]
-tsmult = [1.05 for i in range(nper)]
-steady = [False for i in range(nper)]
+perlen = [1000.0 for _ in range(nper)]
+nstp = [100 for _ in range(nper)]
+tsmult = [1.05 for _ in range(nper)]
+steady = [False for _ in range(nper)]
tdis_rc = []
-for idx in range(nper):
- tdis_rc.append((perlen[idx], nstp[idx], tsmult[idx]))
+for i in range(nper):
+ tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
hnoflo = 1e30
hdry = -1e30
@@ -190,7 +190,7 @@ def calc_stress(sgm0, sgs0, h, bt):
return geo, es
-def build_model(idx, dir):
+def build_models(idx, test):
c6 = []
for j in range(0, ncol, 2):
c6.append([(0, 0, j), chdh[idx]])
@@ -199,10 +199,10 @@ def build_model(idx, dir):
geo, es = calc_stress(sgm, sgs, strt[idx], botm)
sub6 = [[0, (0, 0, 1), "delay", -1.0, thick, 1.0, cc, cr, theta, kv, 1.0]]
- name = ex[idx]
+ name = cases[idx]
# build MODFLOW 6 files
- ws = dir
+ ws = test.workspace
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
@@ -312,11 +312,9 @@ def build_model(idx, dir):
return sim, mc
-def eval_sub(sim):
- print("evaluating subsidence...")
-
+def check_output(idx, test):
# MODFLOW 6 total compaction results
- fpth = os.path.join(sim.simpath, "csub_obs.csv")
+ fpth = os.path.join(test.workspace, "csub_obs.csv")
try:
tc = np.genfromtxt(fpth, names=True, delimiter=",")
except:
@@ -333,36 +331,37 @@ def eval_sub(sim):
# write summary
fpth = os.path.join(
- sim.simpath, f"{os.path.basename(sim.name)}.comp.cmp.out"
+ test.workspace, f"{os.path.basename(test.name)}.comp.cmp.out"
)
- f = open(fpth, "w")
- line = f"{'TOTIM':>15s}"
- line += f" {'CSUB':>15s}"
- line += f" {'MF':>15s}"
- line += f" {'DIFF':>15s}"
- f.write(line + "\n")
- for i in range(diff.shape[0]):
- line = f"{tc0[i]:15g}"
- line += f" {tc['TCOMP'][i]:15g}"
- line += f" {tc0[i]:15g}"
- line += f" {diff[i]:15g}"
+ with open(fpth, "w") as f:
+ line = f"{'TOTIM':>15s}"
+ line += f" {'CSUB':>15s}"
+ line += f" {'MF':>15s}"
+ line += f" {'DIFF':>15s}"
f.write(line + "\n")
- f.close()
+ for i in range(diff.shape[0]):
+ line = f"{tc0[i]:15g}"
+ line += f" {tc['TCOMP'][i]:15g}"
+ line += f" {tc0[i]:15g}"
+ line += f" {diff[i]:15g}"
+ f.write(line + "\n")
if diffmax > dtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
-@pytest.mark.parametrize("name", ex)
-def test_mf6model(name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, 0, str(function_tmpdir))
- test.run(
- TestSimulation(name=name, exe_dict=targets, exfunc=eval_sub, idxsim=0),
- str(function_tmpdir),
+@pytest.mark.parametrize("idx, name", enumerate(cases))
+def test_mf6model(idx, name, function_tmpdir, targets):
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ targets=targets,
)
+ test.run()
diff --git a/autotest/test_gwf_csub_distypes.py b/autotest/test_gwf_csub_distypes.py
new file mode 100644
index 00000000000..dd29974a020
--- /dev/null
+++ b/autotest/test_gwf_csub_distypes.py
@@ -0,0 +1,435 @@
+import pathlib as pl
+
+import flopy
+import numpy as np
+import pytest
+from flopy.utils.gridgen import Gridgen
+
+from conftest import try_get_target
+from framework import TestFramework
+
+cases = ["csub_dis", "csub_disv", "csub_disu", "csub_disu01", "csub_disu02"]
+ex_dict = {name: None for name in cases}
+ex_dict["csub_disu01"] = 0
+ex_dict["csub_disu02"] = 2
+paktest = "csub"
+
+# temporal discretization
+nper = 2
+perlen = [1.0, 100.0]
+nstp = [1, 10]
+tsmult = [1.0] * nper
+tdis_rc = []
+for i in range(nper):
+ tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
+
+# base spatial discretization
+nlay, nrow, ncol = 3, 9, 9
+refinement_level = 2
+nrow_refined, ncol_refined = nrow * refinement_level, ncol * refinement_level
+shape3d = (nlay, nrow, ncol)
+shape3d_refined = (nlay, nrow_refined, ncol_refined)
+shape2d = (nrow, ncol)
+shape2d_refined = (nrow_refined, ncol_refined)
+size3d = nlay * nrow * ncol
+size3d_refined = nlay * nrow_refined * ncol_refined
+size2d = nrow * ncol
+size2d_refined = nrow_refined * ncol_refined
+
+delr = delc = 1000.0
+top = 0.0
+bot = -100.0
+dz = (top - bot) / nlay
+botm = [top - k * dz for k in range(1, nlay + 1)]
+z_node = [z + 0.5 * dz for z in botm]
+
+delr_refined = delr / refinement_level
+delc_refined = delc / refinement_level
+
+hk = [1.0, 0.001, 1.0]
+sy = [0.25, 0.45, 0.25]
+ss = [5e-5, 5e-4, 5e-5]
+
+well_coordinates = (
+ np.array([(4.25, 4.25), (4.25, 4.75), (4.75, 4.75), (4.75, 4.25)]) * delr
+)
+wellq = -1000.0
+
+nouter, ninner = 100, 300
+dvclose, rclose, relax = 1e-6, 0.01, 0.97
+
+# subwt data
+cc = 0.25
+cr = 0.25
+void = 0.82
+theta = void / (1.0 + void)
+kv = 999.0
+sgm = 1.7
+sgs = 2.0
+
+beta = 0.0
+# beta = 4.65120000e-10
+gammaw = 9806.65000000
+
+
+def get_interbed(modelgrid):
+ grid_type = modelgrid.grid_type
+ ia = []
+ x0, x1, y0, y1 = modelgrid.extent
+ for k in range(1, nlay, 1):
+ cellid = modelgrid.intersect(x0 + 0.1, y1 - 0.1, z=z_node[k])
+ ia.append(get_node_number(modelgrid, cellid))
+
+ package_data = []
+ ifno = 0
+ ini_stress = 0.0
+
+ nodes = [node for node in range(ia[0], ia[1])]
+ if grid_type == "structured":
+ cellids = modelgrid.get_lrc(nodes)
+ elif grid_type == "vertex":
+ cellids = modelgrid.get_lni(nodes)
+ else:
+ cellids = [(node,) for node in nodes]
+
+ for cellid in cellids:
+ rnb = 1.0
+ vk = 999.0
+ package_data.append(
+ (
+ ifno,
+ cellid, # will need to be detuplaized with *cellid - does not work for dis
+ "nodelay",
+ ini_stress,
+ modelgrid.cell_thickness[cellid],
+ rnb,
+ ss[1],
+ ss[1] * 1000.0,
+ theta,
+ vk,
+ top,
+ )
+ )
+ ifno += 1
+ return package_data
+
+
+def build_dis(gwf):
+ return flopy.mf6.ModflowGwfdis(
+ gwf,
+ nlay=nlay,
+ nrow=nrow,
+ ncol=ncol,
+ delr=delr,
+ delc=delc,
+ top=top,
+ botm=botm,
+ )
+
+
+def get_gridgen_ws(ws):
+ gridgen_ws = ws / "gridgen"
+ gridgen_ws.mkdir(parents=True, exist_ok=True)
+ return gridgen_ws
+
+
+def build_temp_gwf(ws):
+ gridgen_ws = get_gridgen_ws(ws)
+ gridgen_sim = flopy.mf6.MFSimulation(
+ sim_name="gridgen", sim_ws=gridgen_ws, exe_name="mf6"
+ )
+ gridgen_gwf = flopy.mf6.ModflowGwf(gridgen_sim, modelname="gridgen")
+ return gridgen_gwf
+
+
+def build_disv(ws, gwf, gridgen):
+ temp_gwf = build_temp_gwf(ws)
+ dis = build_dis(temp_gwf)
+ g = Gridgen(
+ temp_gwf.modelgrid,
+ model_ws=get_gridgen_ws(ws),
+ exe_name=gridgen,
+ )
+ g.build()
+ gridprops = g.get_gridprops_disv()
+ return flopy.mf6.ModflowGwfdisv(gwf, **gridprops)
+
+
+def build_disu(ws, gwf, refinement_layer, gridgen):
+ temp_gwf = build_temp_gwf(ws)
+ dis = build_dis(temp_gwf)
+ g = Gridgen(
+ temp_gwf.modelgrid,
+ model_ws=get_gridgen_ws(ws),
+ exe_name=gridgen,
+ )
+ if refinement_layer is not None:
+ x0, x1, y0, y1 = temp_gwf.modelgrid.extent
+ polys = [[[(x0, y0), (x1, y0), (x1, y1), (x0, y1), (x0, y0)]]]
+ g.add_refinement_features(
+ polys,
+ "polygon",
+ 1,
+ layers=[refinement_layer],
+ )
+ g.build()
+ gridprops = g.get_gridprops_disu6()
+ return flopy.mf6.ModflowGwfdisu(gwf, **gridprops)
+
+
+def get_node_number(modelgrid, cellid):
+ if modelgrid.grid_type == "unstructured":
+ node = cellid
+ elif modelgrid.grid_type == "vertex":
+ node = modelgrid.ncpl * cellid[0] + cellid[1]
+ else:
+ node = (
+ modelgrid.nrow * modelgrid.ncol * cellid[0]
+ + modelgrid.ncol * cellid[1]
+ + cellid[2]
+ )
+ return node
+
+
+def build_3d_array(modelgrid, values, dtype=float):
+ if isinstance(values, dtype):
+ arr = np.full(modelgrid.nnodes, values, dtype=dtype)
+ else:
+ arr = np.zeros(modelgrid.nnodes, dtype=dtype)
+ ia = []
+ x0, x1, y0, y1 = modelgrid.extent
+ for k in range(nlay):
+ cellid = modelgrid.intersect(x0 + 0.1, y1 - 0.1, z=z_node[k])
+ ia.append(get_node_number(modelgrid, cellid))
+ ia.append(modelgrid.nnodes + 1)
+ for k in range(nlay):
+ arr[ia[k] : ia[k + 1]] = values[k]
+ return arr.reshape(modelgrid.shape)
+
+
+def build_well_data(modelgrid):
+ well_spd = []
+ for x, y in well_coordinates:
+ cellid = modelgrid.intersect(x, y, z=z_node[-1])
+ if isinstance(cellid, tuple):
+ well_spd.append((*cellid, wellq))
+ else:
+ well_spd.append((cellid, wellq))
+ return {1: well_spd}
+
+
+def build_models(idx, test):
+ gridgen = try_get_target(test.targets, "gridgen")
+ return build_mf6(idx, test.workspace, gridgen), None
+
+
+# build MODFLOW 6 files
+def build_mf6(idx, ws, gridgen):
+ name = cases[idx]
+ sim = flopy.mf6.MFSimulation(
+ sim_name=name,
+ version="mf6",
+ exe_name="mf6",
+ sim_ws=ws,
+ )
+ # create tdis package
+ tdis = flopy.mf6.ModflowTdis(
+ sim,
+ time_units="DAYS",
+ nper=nper,
+ perioddata=tdis_rc,
+ )
+
+ # create iterative model solution and register the gwf model with it
+ ims = flopy.mf6.ModflowIms(
+ sim,
+ print_option="SUMMARY",
+ outer_dvclose=dvclose,
+ outer_maximum=nouter,
+ under_relaxation="NONE",
+ inner_maximum=ninner,
+ inner_dvclose=dvclose,
+ rcloserecord=rclose,
+ linear_acceleration="CG",
+ scaling_method="NONE",
+ reordering_method="NONE",
+ relaxation_factor=relax,
+ )
+
+ # create gwf model
+ gwf = flopy.mf6.ModflowGwf(
+ sim,
+ modelname=name,
+ print_input=True,
+ save_flows=True,
+ )
+
+ if "disv" in name:
+ dis = build_disv(ws, gwf, gridgen)
+ elif "disu" in name:
+ dis = build_disu(ws, gwf, ex_dict[name], gridgen)
+ else:
+ dis = build_dis(gwf)
+
+ # initial conditions
+ ic = flopy.mf6.ModflowGwfic(
+ gwf,
+ strt=top,
+ )
+
+ k11 = build_3d_array(gwf.modelgrid, hk)
+ icelltype = build_3d_array(gwf.modelgrid, [1, 0, 0], dtype=int)
+
+ # node property flow
+ npf = flopy.mf6.ModflowGwfnpf(
+ gwf,
+ icelltype=icelltype,
+ k=k11,
+ k33=k11,
+ )
+
+ # storage
+ sy_arr = build_3d_array(gwf.modelgrid, sy)
+ sto = flopy.mf6.ModflowGwfsto(
+ gwf,
+ iconvert=icelltype,
+ ss=0.0,
+ sy=sy_arr,
+ transient={0: True},
+ )
+
+ # well
+ wel = flopy.mf6.ModflowGwfwel(
+ gwf,
+ stress_period_data=build_well_data(gwf.modelgrid),
+ save_flows=False,
+ )
+
+ # csub
+ cg_ske_cr = build_3d_array(gwf.modelgrid, ss)
+ packagedata = get_interbed(gwf.modelgrid)
+
+ csub = flopy.mf6.ModflowGwfcsub(
+ gwf,
+ zdisplacement_filerecord=f"{name}.csub.zdis.bin",
+ compaction_filerecord=f"{name}.csub.comp.bin",
+ ninterbeds=len(packagedata),
+ sgs=sgs,
+ sgm=sgm,
+ beta=beta,
+ gammaw=gammaw,
+ cg_ske_cr=cg_ske_cr,
+ cg_theta=theta,
+ packagedata=packagedata,
+ )
+ # orecarray = {}
+ # orecarray["csub_obs.csv"] = [
+ # ("wc01", "compaction-cell", (1, 5, 8)),
+ # ("wc02", "compaction-cell", (3, 6, 11)),
+ # ]
+ # csub_obs_package = csub.obs.initialize(
+ # filename=opth, digits=10, print_input=True, continuous=orecarray
+ # )
+
+ # output control
+ oc = flopy.mf6.ModflowGwfoc(
+ gwf,
+ budget_filerecord=f"{name}.cbc",
+ head_filerecord=f"{name}.hds",
+ headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")],
+ saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")],
+ printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")],
+ )
+ return sim
+
+
+def check_output(idx, test):
+ name = cases[idx]
+ ws = pl.Path(test.workspace)
+ test = flopy.mf6.MFSimulation.load(sim_name=name, sim_ws=ws)
+ gwf = test.get_model()
+ x0, x1, y0, y1 = gwf.modelgrid.extent
+
+ comp_obj = flopy.utils.HeadFile(
+ ws / f"{name}.csub.comp.bin",
+ text="CSUB-COMPACTION",
+ precision="double",
+ )
+ zdis_obj = flopy.utils.HeadFile(
+ ws / f"{name}.csub.zdis.bin",
+ text="CSUB-ZDISPLACE",
+ precision="double",
+ )
+
+ layer_refinement = ex_dict[name]
+
+ # create reusable mapping dictionary so it can be used for all time step
+ # with refined disu grids - which do not have naturally ordered node
+ # numbers in refined layers
+ map_dict = {}
+ if layer_refinement is not None:
+ z = z_node[layer_refinement]
+ icnt = 0
+ for i in range(nrow_refined):
+ y = y1 - delc_refined * (i + 0.5)
+ for j in range(ncol_refined):
+ x = x0 + delr_refined * (j + 0.5)
+ node = gwf.modelgrid.intersect(x, y, z=z)
+ map_dict[icnt] = {"node": node, "cellid": (i, j)}
+ icnt += 1
+
+ for totim in comp_obj.get_times():
+ if layer_refinement is None:
+ comp = comp_obj.get_data(totim=totim).flatten().reshape(shape3d)
+ zdis = zdis_obj.get_data(totim=totim).flatten().reshape(shape3d)
+ else:
+ comp1d = comp_obj.get_data(totim=totim).squeeze()
+ zdis1d = zdis_obj.get_data(totim=totim).squeeze()
+ ia = [0]
+ for k in range(nlay):
+ if k == layer_refinement:
+ ia.append(ia[k] + size2d_refined)
+ else:
+ ia.append(ia[k] + size2d)
+
+ comp = np.zeros(shape3d, dtype=float)
+ zdis = np.zeros(shape3d, dtype=float)
+ for k in range(nlay):
+ ia0 = ia[k]
+ ia1 = ia[k + 1]
+ comp_slice = comp1d[ia0:ia1].copy()
+ zdis_slice = zdis1d[ia0:ia1].copy()
+ if k == layer_refinement:
+ comp_temp = np.zeros(shape2d_refined, dtype=float)
+ zdis_temp = np.zeros(shape2d_refined, dtype=float)
+ for value in map_dict.values():
+ comp_temp[value["cellid"]] = comp1d[value["node"]]
+ zdis_temp[value["cellid"]] = zdis1d[value["node"]]
+ comp[k] = comp_temp.reshape(
+ nrow_refined // 2, 2, ncol_refined // 2, 2
+ ).mean(axis=(1, -1))
+ zdis[k] = zdis_temp.reshape(
+ nrow_refined // 2, 2, ncol_refined // 2, 2
+ ).mean(axis=(1, -1))
+ else:
+ comp[k] = comp_slice.reshape(shape2d)
+ zdis[k] = zdis_slice.reshape(shape2d)
+
+ comp = comp.sum(axis=0)
+ zdis = zdis[0]
+ assert np.allclose(comp, zdis), (
+ "sum of compaction is not equal to the "
+ + f"z-displacement at time {totim}"
+ )
+
+
+@pytest.mark.parametrize("idx, name", enumerate(cases))
+def test_mf6model(idx, name, function_tmpdir, targets):
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ targets=targets,
+ )
+ test.run()
diff --git a/autotest/test_gwf_csub_inelastic.py b/autotest/test_gwf_csub_inelastic.py
index 9d4e40575b7..dd5546092f2 100644
--- a/autotest/test_gwf_csub_inelastic.py
+++ b/autotest/test_gwf_csub_inelastic.py
@@ -3,12 +3,12 @@
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
paktest = "csub"
budtol = 1e-2
-ex = ["csub_de01a"]
+cases = ["csub_de01a"]
# static model data
# spatial discretization
@@ -75,7 +75,7 @@
def build_mf6(idx, ws, update=None):
- name = ex[idx]
+ name = cases[idx]
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
@@ -185,16 +185,10 @@ def build_mf6(idx, ws, update=None):
return sim
-def build_model(idx, dir):
- name = ex[idx]
- ws = dir
-
- # build MODFLOW 6 files
- sim = build_mf6(idx, ws)
-
- ws = os.path.join(dir, "mf6")
- mc = build_mf6(idx, ws, update=True)
-
+def build_models(idx, test):
+ name = cases[idx]
+ sim = build_mf6(idx, test.workspace)
+ mc = build_mf6(idx, os.path.join(test.workspace, "mf6"), update=True)
return sim, mc
@@ -208,13 +202,11 @@ def calc_void(theta):
return theta / (1.0 - theta)
-def eval_void(sim):
- print("evaluating void ratio...")
-
- fpth = os.path.join(sim.simpath, "csub_obs.csv")
+def check_output(idx, test):
+ fpth = os.path.join(test.workspace, "csub_obs.csv")
cd = np.genfromtxt(fpth, delimiter=",", names=True)
- fpth = os.path.join(sim.simpath, "mf6", "csub_obs.csv")
+ fpth = os.path.join(test.workspace, "mf6", "csub_obs.csv")
cd2 = np.genfromtxt(fpth, delimiter=",", names=True)
v = calc_comp2void(cd["COMP"])
@@ -228,42 +220,38 @@ def eval_void(sim):
# write summary
fpth = os.path.join(
- sim.simpath, f"{os.path.basename(sim.name)}.comp.cmp.out"
+ test.workspace, f"{os.path.basename(test.name)}.comp.cmp.out"
)
- f = open(fpth, "w")
- line = f"{'TOTIM':>15s}"
- line += f" {'VOID':>15s}"
- line += f" {'MF':>15s}"
- line += f" {'DIFF':>15s}"
- f.write(line + "\n")
- for i in range(diff.shape[0]):
- line = f"{cd['time'][i]:15g}"
- line += f" {v[i]:15g}"
- line += f" {v[i]:15g}"
- line += f" {diff[i]:15g}"
+ with open(fpth, "w") as f:
+ line = f"{'TOTIM':>15s}"
+ line += f" {'VOID':>15s}"
+ line += f" {'MF':>15s}"
+ line += f" {'DIFF':>15s}"
f.write(line + "\n")
- f.close()
+ for i in range(diff.shape[0]):
+ line = f"{cd['time'][i]:15g}"
+ line += f" {v[i]:15g}"
+ line += f" {v[i]:15g}"
+ line += f" {diff[i]:15g}"
+ f.write(line + "\n")
if diffmax > dtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
@pytest.mark.slow
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, exfunc=eval_void, idxsim=idx
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ targets=targets,
)
+ test.run()
diff --git a/autotest/test_gwf_csub_ndb01_nr.py b/autotest/test_gwf_csub_ndb01_nr.py
index 0360ca612ea..8923da61787 100644
--- a/autotest/test_gwf_csub_ndb01_nr.py
+++ b/autotest/test_gwf_csub_ndb01_nr.py
@@ -3,11 +3,10 @@
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from modflow_devtools.misc import is_in_ci
-from simulation import TestSimulation
-ex = (
+cases = (
"csub_ndb01a",
"csub_ndb01b",
"csub_ndb01c",
@@ -60,10 +59,10 @@
nlay, nrow, ncol = 2, 1, 2
nper = 3
tsp0 = 1.0
-perlen = [tsp0] + [365.2500000 for i in range(nper - 1)]
-nstp = [1] + [200 for i in range(nper - 1)]
-tsmult = [1.0] + [1.0 for i in range(nper - 1)]
-steady = [True] + [False for i in range(nper - 1)]
+perlen = [tsp0] + [365.2500000 for _ in range(nper - 1)]
+nstp = [1] + [200 for _ in range(nper - 1)]
+tsmult = [1.0] + [1.0 for _ in range(nper - 1)]
+steady = [True] + [False for _ in range(nper - 1)]
delr, delc = 1000.0, 1000.0
top = 0.0
botm = [-10.0, -20.0]
@@ -82,8 +81,8 @@
hclose, rclose, relax = 1e-9, 1e-3, 1.0
tdis_rc = []
-for idx in range(nper):
- tdis_rc.append((perlen[idx], nstp[idx], tsmult[idx]))
+for i in range(nper):
+ tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
# all cells are active
ib = 1
@@ -107,12 +106,12 @@
H0 = 0.0
-def build_model(idx, dir):
- name = ex[idx]
+def build_models(idx, test):
+ name = cases[idx]
newton = newtons[idx]
# build MODFLOW 6 files
- ws = dir
+ ws = test.workspace
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
@@ -296,18 +295,16 @@ def build_model(idx, dir):
return sim, None
-def eval_comp(sim):
- print("evaluating compaction...")
-
+def check_output(idx, test):
# MODFLOW 6 total compaction results
- fpth = os.path.join(sim.simpath, "csub_obs.csv")
+ fpth = os.path.join(test.workspace, "csub_obs.csv")
try:
tc = np.genfromtxt(fpth, names=True, delimiter=",")
except:
assert False, f'could not load data from "{fpth}"'
# get results from listing file
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.lst")
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.lst")
budl = flopy.utils.Mf6ListBudget(fpth)
names = list(bud_lst)
d0 = budl.get_budget(names=names)[0]
@@ -319,11 +316,11 @@ def eval_comp(sim):
d = np.recarray(nbud, dtype=dtype)
for key in bud_lst:
d[key] = 0.0
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.cbc")
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.cbc")
cobj = flopy.utils.CellBudgetFile(fpth, precision="double")
kk = cobj.get_kstpkper()
times = cobj.get_times()
- for idx, (k, t) in enumerate(zip(kk, times)):
+ for i, (k, t) in enumerate(zip(kk, times)):
for text in cbc_bud:
qin = 0.0
qout = 0.0
@@ -336,64 +333,57 @@ def eval_comp(sim):
qout -= vv
else:
qin += vv
- d["totim"][idx] = t
- d["time_step"][idx] = k[0]
+ d["totim"][i] = t
+ d["time_step"][i] = k[0]
d["stress_period"] = k[1]
key = f"{text}_IN"
- d[key][idx] = qin
+ d[key][i] = qin
key = f"{text}_OUT"
- d[key][idx] = qout
+ d[key][i] = qout
diff = np.zeros((nbud, len(bud_lst)), dtype=float)
- for idx, key in enumerate(bud_lst):
- diff[:, idx] = d0[key] - d[key]
+ for i, key in enumerate(bud_lst):
+ diff[:, i] = d0[key] - d[key]
diffmax = np.abs(diff).max()
msg = f"maximum absolute total-budget difference ({diffmax}) "
# write summary
fpth = os.path.join(
- sim.simpath, f"{os.path.basename(sim.name)}.bud.cmp.out"
+ test.workspace, f"{os.path.basename(test.name)}.bud.cmp.out"
)
- f = open(fpth, "w")
- for i in range(diff.shape[0]):
- if i == 0:
- line = f"{'TIME':>10s}"
- for idx, key in enumerate(bud_lst):
- line += f"{key + '_LST':>25s}"
- line += f"{key + '_CBC':>25s}"
- line += f"{key + '_DIF':>25s}"
+ with open(fpth, "w") as f:
+ for i in range(diff.shape[0]):
+ if i == 0:
+ line = f"{'TIME':>10s}"
+ for key in bud_lst:
+ line += f"{key + '_LST':>25s}"
+ line += f"{key + '_CBC':>25s}"
+ line += f"{key + '_DIF':>25s}"
+ f.write(line + "\n")
+ line = f"{d['totim'][i]:10g}"
+ for ii, key in enumerate(bud_lst):
+ line += f"{d0[key][i]:25g}"
+ line += f"{d[key][i]:25g}"
+ line += f"{diff[i, ii]:25g}"
f.write(line + "\n")
- line = f"{d['totim'][i]:10g}"
- for idx, key in enumerate(bud_lst):
- line += f"{d0[key][i]:25g}"
- line += f"{d[key][i]:25g}"
- line += f"{diff[i, idx]:25g}"
- f.write(line + "\n")
- f.close()
if diffmax > budtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name,
- exe_dict=targets,
- exfunc=eval_comp,
- htol=htol,
- idxsim=idx,
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ targets=targets,
+ htol=htol,
)
+ test.run()
diff --git a/autotest/test_gwf_csub_sk01.py b/autotest/test_gwf_csub_sk01.py
index ed222a07720..9d993088617 100644
--- a/autotest/test_gwf_csub_sk01.py
+++ b/autotest/test_gwf_csub_sk01.py
@@ -1,488 +1,461 @@
import os
-from typing import List, NamedTuple
import flopy
import numpy as np
+import pytest
+
from framework import TestFramework
-from pytest_cases import parametrize, parametrize_with_cases
-from simulation import TestSimulation
-
-
-class GwfCsubSkCases:
- dtol: float = 1e-3
- budtol: float = 0.01
- bud_lst: List[str] = [
- "CSUB-CGELASTIC_IN",
- "CSUB-CGELASTIC_OUT",
- "CSUB-WATERCOMP_IN",
- "CSUB-WATERCOMP_OUT",
+
+simname = "gwfcsubsk01"
+cases = [f"{simname}a", f"{simname}b", f"{simname}c"]
+dtol = 1e-3
+budtol = 0.01
+bud_lst = [
+ "CSUB-CGELASTIC_IN",
+ "CSUB-CGELASTIC_OUT",
+ "CSUB-WATERCOMP_IN",
+ "CSUB-WATERCOMP_OUT",
+]
+cvopt = [None, None, None]
+constantcv = [True, True, True]
+ndelaybeds = [0, 0, 0]
+top = [0, 0, 15]
+newton = [False, True, True]
+htol = [None, None, 0.3]
+
+
+def build_models(idx, test):
+ sim = get_model(idx, test.workspace)
+ cmp = get_model(idx, test.workspace / "mf6_regression")
+ return sim, cmp
+
+
+def get_model(idx, workspace):
+ name = cases[idx]
+ newtonoptions = None
+ imsla = "CG"
+ if newton[idx]:
+ newtonoptions = "NEWTON"
+ imsla = "BICGSTAB"
+
+ # static model data
+ nlay, nrow, ncol = 3, 10, 10
+ nper = 31
+ perlen = [1.0] + [365.2500000 for i in range(nper - 1)]
+ nstp = [1] + [6 for i in range(nper - 1)]
+ tsmult = [1.0] + [1.3 for i in range(nper - 1)]
+ steady = [True] + [False for i in range(nper - 1)]
+ delr, delc = 1000.0, 2000.0
+ botm = [-100, -150.0, -350.0]
+ zthick = [top[idx] - botm[0], botm[0] - botm[1], botm[1] - botm[2]]
+ strt = 100.0
+ hnoflo = 1e30
+ hdry = -1e30
+
+ # calculate hk
+ hk1fact = 1.0 / zthick[1]
+ hk1 = np.ones((nrow, ncol), dtype=float) * 0.5 * hk1fact
+ hk1[0, :] = 1000.0 * hk1fact
+ hk1[-1, :] = 1000.0 * hk1fact
+ hk1[:, 0] = 1000.0 * hk1fact
+ hk1[:, -1] = 1000.0 * hk1fact
+ hk = [20.0, hk1, 5.0]
+
+ # calculate vka
+ vka = [1e6, 7.5e-5, 1e6]
+
+ # set rest of npf variables
+ laytyp = [1, 0, 0]
+ laytypu = [4, 0, 0]
+ sy = 0.0 # [0.1, 0., 0.]
+
+ nouter, ninner = 500, 300
+ hclose, rclose, relax = 1e-9, 1e-6, 1.0
+
+ tdis_rc = []
+ for i in range(nper):
+ tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
+
+ # all cells are active
+ ib = 1
+
+ # chd data
+ c = []
+ c6 = []
+ ccol = [3, 4, 5, 6]
+ for j in ccol:
+ c.append([0, nrow - 1, j, strt, strt])
+ c6.append([(0, nrow - 1, j), strt])
+ cd = {0: c}
+ cd6 = {0: c6}
+ maxchd = len(cd[0])
+
+ # pumping well data
+ wr = [0, 0, 0, 0, 1, 1, 2, 2, 3, 3]
+ wc = [0, 1, 8, 9, 0, 9, 0, 9, 0, 0]
+ wrp = [2, 2, 3, 3]
+ wcp = [5, 6, 5, 6]
+ wq = [-14000.0, -8000.0, -5000.0, -3000.0]
+ d = []
+ d6 = []
+ for r, c, q in zip(wrp, wcp, wq):
+ d.append([2, r, c, q])
+ d6.append([(2, r, c), q])
+ wd = {1: d}
+ wd6 = {1: d6}
+ maxwel = len(wd[1])
+
+ # recharge data
+ q = 3000.0 / (delr * delc)
+ v = np.zeros((nrow, ncol), dtype=float)
+ for r, c in zip(wr, wc):
+ v[r, c] = q
+ rech = {0: v}
+
+ # static ibc and sub data
+ sgm = 0.0
+ sgs = 0.0
+ omega = 1.0
+ void = 0.82
+ theta = void / (1.0 + void)
+ sw = 4.65120000e-10 * 9806.65000000 * theta
+
+ # no delay bed data
+ nndb = 3
+ lnd = [0, 1, 2]
+ hc = [botm[-1] for k in range(nlay)]
+ thicknd0 = [zthick[0], zthick[1], zthick[2]]
+ ccnd0 = [6e-6, 3e-6, 6e-6]
+ crnd0 = [6e-6, 3e-6, 6e-6]
+ sfv = []
+ sfe = []
+ for k in range(nlay):
+ sfv.append(ccnd0[k] * thicknd0[k])
+ sfe.append(crnd0[k] * thicknd0[k])
+
+ # sub output data
+ ds15 = [0, 0, 0, 2052, 0, 0, 0, 0, 0, 0, 0, 0]
+ ds16 = [
+ 0,
+ nper - 1,
+ 0,
+ nstp[-1] - 1,
+ 0,
+ 0,
+ 1,
+ 1,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 1,
]
- class Data(NamedTuple):
- name: str
- cvopt: str
- constantcv: bool
- ndelaybeds: int
- top: float
- newton: bool
- htol: float
-
- @parametrize(
- data=[
- Data("a", None, True, 0, 0, False, None),
- Data("b", None, True, 0, 0, True, None),
- Data("c", None, True, 0, 15, True, 0.3),
- ]
+ # build MODFLOW 6 files
+ sim = flopy.mf6.MFSimulation(
+ sim_name=name,
+ version="mf6",
+ exe_name="mf6",
+ sim_ws=str(workspace),
+ )
+ # create tdis package
+ tdis = flopy.mf6.ModflowTdis(
+ sim, time_units="DAYS", nper=nper, perioddata=tdis_rc
+ )
+
+ # create iterative model solution
+ ims = flopy.mf6.ModflowIms(
+ sim,
+ print_option="SUMMARY",
+ outer_dvclose=hclose,
+ outer_maximum=nouter,
+ under_relaxation="NONE",
+ inner_maximum=ninner,
+ inner_dvclose=hclose,
+ rcloserecord=rclose,
+ linear_acceleration=imsla,
+ scaling_method="NONE",
+ reordering_method="NONE",
+ relaxation_factor=relax,
+ )
+
+ # create gwf model
+ gwf = flopy.mf6.ModflowGwf(
+ sim, modelname=name, newtonoptions=newtonoptions
+ )
+
+ dis = flopy.mf6.ModflowGwfdis(
+ gwf,
+ nlay=nlay,
+ nrow=nrow,
+ ncol=ncol,
+ delr=delr,
+ delc=delc,
+ top=top[idx],
+ botm=botm,
+ filename=f"{name}.dis",
+ )
+
+ # initial conditions
+ ic = flopy.mf6.ModflowGwfic(gwf, strt=strt, filename=f"{name}.ic")
+
+ # node property flow
+ npf = flopy.mf6.ModflowGwfnpf(
+ gwf,
+ save_flows=False,
+ # dev_modflowusg_upstream_weighted_saturation=True,
+ icelltype=laytyp,
+ cvoptions=cvopt[idx],
+ k=hk,
+ k33=vka,
+ )
+ # storage
+ sto = flopy.mf6.ModflowGwfsto(
+ gwf,
+ save_flows=False,
+ iconvert=laytyp,
+ ss=0.0,
+ sy=sy,
+ storagecoefficient=True,
+ steady_state={0: True},
+ transient={1: True},
+ )
+
+ # recharge
+ rch = flopy.mf6.ModflowGwfrcha(gwf, readasarrays=True, recharge=rech)
+
+ # wel file
+ wel = flopy.mf6.ModflowGwfwel(
+ gwf,
+ print_input=True,
+ print_flows=True,
+ maxbound=maxwel,
+ stress_period_data=wd6,
+ save_flows=False,
+ )
+
+ # chd files
+ chd = flopy.mf6.modflow.mfgwfchd.ModflowGwfchd(
+ gwf, maxbound=maxchd, stress_period_data=cd6, save_flows=False
+ )
+ # csub files
+ opth = f"{name}.csub.obs"
+ csub = flopy.mf6.ModflowGwfcsub(
+ gwf,
+ head_based=True,
+ save_flows=True,
+ ninterbeds=0,
+ cg_theta=theta,
+ cg_ske_cr=crnd0,
+ packagedata=None,
)
- def case_generator(self, function_tmpdir, data):
- sim = self.get_model(data, function_tmpdir)
- cmp = self.get_model(data, function_tmpdir / "mf6_regression")
- return data, sim, cmp, self.eval_case
-
- def get_model(self, data, function_tmpdir):
- name = data.name
- newton = data.newton
- newtonoptions = None
- imsla = "CG"
- if newton:
- newtonoptions = "NEWTON"
- imsla = "BICGSTAB"
-
- # static model data
- nlay, nrow, ncol = 3, 10, 10
- nper = 31
- perlen = [1.0] + [365.2500000 for i in range(nper - 1)]
- nstp = [1] + [6 for i in range(nper - 1)]
- tsmult = [1.0] + [1.3 for i in range(nper - 1)]
- steady = [True] + [False for i in range(nper - 1)]
- delr, delc = 1000.0, 2000.0
- top = 0.0
- botm = [-100, -150.0, -350.0]
- zthick = [top - botm[0], botm[0] - botm[1], botm[1] - botm[2]]
- strt = 100.0
- hnoflo = 1e30
- hdry = -1e30
-
- # calculate hk
- hk1fact = 1.0 / zthick[1]
- hk1 = np.ones((nrow, ncol), dtype=float) * 0.5 * hk1fact
- hk1[0, :] = 1000.0 * hk1fact
- hk1[-1, :] = 1000.0 * hk1fact
- hk1[:, 0] = 1000.0 * hk1fact
- hk1[:, -1] = 1000.0 * hk1fact
- hk = [20.0, hk1, 5.0]
-
- # calculate vka
- vka = [1e6, 7.5e-5, 1e6]
-
- # set rest of npf variables
- laytyp = [1, 0, 0]
- laytypu = [4, 0, 0]
- sy = 0.0 # [0.1, 0., 0.]
-
- nouter, ninner = 500, 300
- hclose, rclose, relax = 1e-9, 1e-6, 1.0
-
- tdis_rc = []
- for idx in range(nper):
- tdis_rc.append((perlen[idx], nstp[idx], tsmult[idx]))
-
- # all cells are active
- ib = 1
-
- # chd data
- c = []
- c6 = []
- ccol = [3, 4, 5, 6]
- for j in ccol:
- c.append([0, nrow - 1, j, strt, strt])
- c6.append([(0, nrow - 1, j), strt])
- cd = {0: c}
- cd6 = {0: c6}
- maxchd = len(cd[0])
-
- # pumping well data
- wr = [0, 0, 0, 0, 1, 1, 2, 2, 3, 3]
- wc = [0, 1, 8, 9, 0, 9, 0, 9, 0, 0]
- wrp = [2, 2, 3, 3]
- wcp = [5, 6, 5, 6]
- wq = [-14000.0, -8000.0, -5000.0, -3000.0]
- d = []
- d6 = []
- for r, c, q in zip(wrp, wcp, wq):
- d.append([2, r, c, q])
- d6.append([(2, r, c), q])
- wd = {1: d}
- wd6 = {1: d6}
- maxwel = len(wd[1])
-
- # recharge data
- q = 3000.0 / (delr * delc)
- v = np.zeros((nrow, ncol), dtype=float)
- for r, c in zip(wr, wc):
- v[r, c] = q
- rech = {0: v}
-
- # static ibc and sub data
- sgm = 0.0
- sgs = 0.0
- omega = 1.0
- void = 0.82
- theta = void / (1.0 + void)
- sw = 4.65120000e-10 * 9806.65000000 * theta
-
- # no delay bed data
- nndb = 3
- lnd = [0, 1, 2]
- hc = [botm[-1] for k in range(nlay)]
- thicknd0 = [zthick[0], zthick[1], zthick[2]]
- ccnd0 = [6e-6, 3e-6, 6e-6]
- crnd0 = [6e-6, 3e-6, 6e-6]
- sfv = []
- sfe = []
- for k in range(nlay):
- sfv.append(ccnd0[k] * thicknd0[k])
- sfe.append(crnd0[k] * thicknd0[k])
-
- # sub output data
- ds15 = [0, 0, 0, 2052, 0, 0, 0, 0, 0, 0, 0, 0]
- ds16 = [
- 0,
- nper - 1,
- 0,
- nstp[-1] - 1,
- 0,
- 0,
- 1,
- 1,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 1,
- ]
-
- # build MODFLOW 6 files
- sim = flopy.mf6.MFSimulation(
- sim_name=name,
- version="mf6",
- exe_name="mf6",
- sim_ws=str(function_tmpdir),
- )
- # create tdis package
- tdis = flopy.mf6.ModflowTdis(
- sim, time_units="DAYS", nper=nper, perioddata=tdis_rc
- )
-
- # create iterative model solution
- ims = flopy.mf6.ModflowIms(
- sim,
- print_option="SUMMARY",
- outer_dvclose=hclose,
- outer_maximum=nouter,
- under_relaxation="NONE",
- inner_maximum=ninner,
- inner_dvclose=hclose,
- rcloserecord=rclose,
- linear_acceleration=imsla,
- scaling_method="NONE",
- reordering_method="NONE",
- relaxation_factor=relax,
- )
-
- # create gwf model
- gwf = flopy.mf6.ModflowGwf(
- sim, modelname=name, newtonoptions=newtonoptions
- )
-
- dis = flopy.mf6.ModflowGwfdis(
- gwf,
- nlay=nlay,
- nrow=nrow,
- ncol=ncol,
- delr=delr,
- delc=delc,
- top=data.top,
- botm=botm,
- filename=f"{name}.dis",
- )
-
- # initial conditions
- ic = flopy.mf6.ModflowGwfic(gwf, strt=strt, filename=f"{name}.ic")
-
- # node property flow
- npf = flopy.mf6.ModflowGwfnpf(
- gwf,
- save_flows=False,
- # dev_modflowusg_upstream_weighted_saturation=True,
- icelltype=laytyp,
- cvoptions=data.cvopt,
- k=hk,
- k33=vka,
- )
- # storage
- sto = flopy.mf6.ModflowGwfsto(
- gwf,
- save_flows=False,
- iconvert=laytyp,
- ss=0.0,
- sy=sy,
- storagecoefficient=True,
- steady_state={0: True},
- transient={1: True},
- )
-
- # recharge
- rch = flopy.mf6.ModflowGwfrcha(gwf, readasarrays=True, recharge=rech)
-
- # wel file
- wel = flopy.mf6.ModflowGwfwel(
- gwf,
- print_input=True,
- print_flows=True,
- maxbound=maxwel,
- stress_period_data=wd6,
- save_flows=False,
- )
-
- # chd files
- chd = flopy.mf6.modflow.mfgwfchd.ModflowGwfchd(
- gwf, maxbound=maxchd, stress_period_data=cd6, save_flows=False
- )
- # csub files
- opth = f"{name}.csub.obs"
- csub = flopy.mf6.ModflowGwfcsub(
- gwf,
- head_based=True,
- save_flows=True,
- ninterbeds=0,
- cg_theta=theta,
- cg_ske_cr=crnd0,
- packagedata=None,
- )
- obspos = [(0, 4, 4), (1, 4, 4), (2, 4, 4)]
- obstype = ["compaction-cell", "csub-cell"]
- obstag = ["tcomp", "csub"]
- obsarr = []
- for iobs, cobs in enumerate(obstype):
- for jobs, otup in enumerate(obspos):
- otag = f"{obstag[iobs]}{jobs + 1}"
- obsarr.append((otag, cobs, otup))
-
- obsarr2 = []
- obstype2 = [
- "csub",
- "inelastic-csub",
- "elastic-csub",
- "sk",
- "ske",
- "thickness",
- "theta",
- "interbed-compaction",
- "inelastic-compaction",
- "elastic-compaction",
- "delay-flowtop",
- "delay-flowbot",
- ]
- iobs = 0
- for cobs in obstype2:
- iobs += 1
- otag = f"obs{iobs:03d}"
- obsarr2.append((otag, cobs, (0,)))
-
- obstype3 = [
- "delay-preconstress",
- "delay-head",
- "delay-gstress",
- "delay-estress",
- "delay-compaction",
- "delay-thickness",
- "delay-theta",
- ]
- for cobs in obstype3:
- iobs += 1
- otag = f"obs{iobs:03d}"
- obsarr2.append((otag, cobs, (0,), (0,)))
-
- obsarr3 = []
- obstype4 = [
- "gstress-cell",
- "estress-cell",
- "thickness-cell",
- "coarse-csub",
- "wcomp-csub-cell",
- "coarse-compaction",
- "coarse-theta",
- "coarse-thickness",
- "csub-cell",
- "ske-cell",
- "sk-cell",
- "theta-cell",
- "compaction-cell",
- ]
- for cobs in obstype4:
- iobs += 1
- otag = f"obs{iobs:03d}"
- obsarr3.append((otag, cobs, obspos[-1]))
-
- orecarray = {}
- orecarray["csub_obs.csv"] = obsarr
- orecarray["interbed_obs.csv"] = obsarr2
- orecarray["coarse_cell_obs.csv"] = obsarr3
-
- csub_obs_package = csub.obs.initialize(
- filename=opth, digits=10, print_input=True, continuous=orecarray
- )
-
- # output control
- oc = flopy.mf6.ModflowGwfoc(
- gwf,
- budget_filerecord=f"{name}.cbc",
- head_filerecord=f"{name}.hds",
- headprintrecord=[
- ("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")
- ],
- saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")],
- printrecord=[("HEAD", "LAST"), ("BUDGET", "ALL")],
- )
-
- return sim
-
- def eval_case(self, sim, data):
- print("evaluating compaction...")
-
- # MODFLOW 6 total compaction results
- fpth = os.path.join(sim.simpath, "csub_obs.csv")
- tc = np.genfromtxt(fpth, names=True, delimiter=",")
-
- # regression compaction results
- cpth = "mf6_regression"
- fpth = os.path.join(sim.simpath, cpth, "csub_obs.csv")
- tc0 = np.genfromtxt(fpth, names=True, delimiter=",")
-
- # calculate maximum absolute error
- diff = tc["TCOMP3"] - tc0["TCOMP3"]
- diffmax = np.abs(diff).max()
- msg = f"maximum absolute total-compaction difference ({diffmax}) "
-
- # write summary
- fpth = os.path.join(
- sim.simpath, f"{os.path.basename(sim.name)}.comp.cmp.out"
- )
- f = open(fpth, "w")
+ obspos = [(0, 4, 4), (1, 4, 4), (2, 4, 4)]
+ obstype = ["compaction-cell", "csub-cell"]
+ obstag = ["tcomp", "csub"]
+ obsarr = []
+ for iobs, cobs in enumerate(obstype):
+ for jobs, otup in enumerate(obspos):
+ otag = f"{obstag[iobs]}{jobs + 1}"
+ obsarr.append((otag, cobs, otup))
+
+ obsarr2 = []
+ obstype2 = [
+ "csub",
+ "inelastic-csub",
+ "elastic-csub",
+ "sk",
+ "ske",
+ "thickness",
+ "theta",
+ "interbed-compaction",
+ "inelastic-compaction",
+ "elastic-compaction",
+ "delay-flowtop",
+ "delay-flowbot",
+ ]
+ iobs = 0
+ for cobs in obstype2:
+ iobs += 1
+ otag = f"obs{iobs:03d}"
+ obsarr2.append((otag, cobs, (0,)))
+
+ obstype3 = [
+ "delay-preconstress",
+ "delay-head",
+ "delay-gstress",
+ "delay-estress",
+ "delay-compaction",
+ "delay-thickness",
+ "delay-theta",
+ ]
+ for cobs in obstype3:
+ iobs += 1
+ otag = f"obs{iobs:03d}"
+ obsarr2.append((otag, cobs, (0,), (0,)))
+
+ obsarr3 = []
+ obstype4 = [
+ "gstress-cell",
+ "estress-cell",
+ "thickness-cell",
+ "coarse-csub",
+ "wcomp-csub-cell",
+ "coarse-compaction",
+ "coarse-theta",
+ "coarse-thickness",
+ "csub-cell",
+ "ske-cell",
+ "sk-cell",
+ "theta-cell",
+ "compaction-cell",
+ ]
+ for cobs in obstype4:
+ iobs += 1
+ otag = f"obs{iobs:03d}"
+ obsarr3.append((otag, cobs, obspos[-1]))
+
+ orecarray = {}
+ orecarray["csub_obs.csv"] = obsarr
+ orecarray["interbed_obs.csv"] = obsarr2
+ orecarray["coarse_cell_obs.csv"] = obsarr3
+
+ csub_obs_package = csub.obs.initialize(
+ filename=opth, digits=10, print_input=True, continuous=orecarray
+ )
+
+ # output control
+ oc = flopy.mf6.ModflowGwfoc(
+ gwf,
+ budget_filerecord=f"{name}.cbc",
+ head_filerecord=f"{name}.hds",
+ headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")],
+ saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")],
+ printrecord=[("HEAD", "LAST"), ("BUDGET", "ALL")],
+ )
+
+ return sim
+
+
+def check_output(idx, test):
+ # MODFLOW 6 total compaction results
+ fpth = os.path.join(test.workspace, "csub_obs.csv")
+ tc = np.genfromtxt(fpth, names=True, delimiter=",")
+
+ # regression compaction results
+ cpth = "mf6_regression"
+ fpth = os.path.join(test.workspace, cpth, "csub_obs.csv")
+ tc0 = np.genfromtxt(fpth, names=True, delimiter=",")
+
+ # calculate maximum absolute error
+ diff = tc["TCOMP3"] - tc0["TCOMP3"]
+ diffmax = np.abs(diff).max()
+ msg = f"maximum absolute total-compaction difference ({diffmax}) "
+
+ # write summary
+ fpth = os.path.join(
+ test.workspace, f"{os.path.basename(test.name)}.comp.cmp.out"
+ )
+ with open(fpth, "w") as f:
for i in range(diff.shape[0]):
line = f"{tc0['time'][i]:10.2g}"
line += f"{tc['TCOMP3'][i]:10.2g}"
line += f"{tc0['TCOMP3'][i]:10.2g}"
line += f"{diff[i]:10.2g}"
f.write(line + "\n")
- f.close()
-
- if diffmax > self.dtol:
- sim.success = False
- msg += f"exceeds {self.dtol}"
- assert diffmax < self.dtol, msg
- else:
- sim.success = True
- print(" " + msg)
-
- # get results from listing file
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.lst")
- budl = flopy.utils.Mf6ListBudget(fpth)
- names = list(self.bud_lst)
- d0 = budl.get_budget(names=names)[0]
- dtype = d0.dtype
- nbud = d0.shape[0]
-
- # get results from cbc file
- cbc_bud = ["CSUB-CGELASTIC", "CSUB-WATERCOMP"]
- d = np.recarray(nbud, dtype=dtype)
- for key in self.bud_lst:
- d[key] = 0.0
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.cbc")
- cobj = flopy.utils.CellBudgetFile(fpth, precision="double")
- kk = cobj.get_kstpkper()
- times = cobj.get_times()
- for idx, (k, t) in enumerate(zip(kk, times)):
- for text in cbc_bud:
- qin = 0.0
- qout = 0.0
- v = cobj.get_data(kstpkper=k, text=text)[0]
- for kk in range(v.shape[0]):
- for ii in range(v.shape[1]):
- for jj in range(v.shape[2]):
- vv = v[kk, ii, jj]
- if vv < 0.0:
- qout -= vv
- else:
- qin += vv
- d["totim"][idx] = t
- d["time_step"][idx] = k[0]
- d["stress_period"] = k[1]
- key = f"{text}_IN"
- d[key][idx] = qin
- key = f"{text}_OUT"
- d[key][idx] = qout
-
- diff = np.zeros((nbud, len(self.bud_lst)), dtype=float)
- for idx, key in enumerate(self.bud_lst):
- diff[:, idx] = d0[key] - d[key]
- diffmax = np.abs(diff).max()
- msg = f"maximum absolute total-budget difference ({diffmax}) "
-
- # write summary
- fpth = os.path.join(
- sim.simpath, f"{os.path.basename(sim.name)}.bud.cmp.out"
- )
- f = open(fpth, "w")
+
+ if diffmax > dtol:
+ test.success = False
+ msg += f"exceeds {dtol}"
+ assert diffmax < dtol, msg
+ else:
+ test.success = True
+ print(" " + msg)
+
+ # get results from listing file
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.lst")
+ budl = flopy.utils.Mf6ListBudget(fpth)
+ names = list(bud_lst)
+ d0 = budl.get_budget(names=names)[0]
+ dtype = d0.dtype
+ nbud = d0.shape[0]
+
+ # get results from cbc file
+ cbc_bud = ["CSUB-CGELASTIC", "CSUB-WATERCOMP"]
+ d = np.recarray(nbud, dtype=dtype)
+ for key in bud_lst:
+ d[key] = 0.0
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.cbc")
+ cobj = flopy.utils.CellBudgetFile(fpth, precision="double")
+ kk = cobj.get_kstpkper()
+ times = cobj.get_times()
+ for i, (k, t) in enumerate(zip(kk, times)):
+ for text in cbc_bud:
+ qin = 0.0
+ qout = 0.0
+ v = cobj.get_data(kstpkper=k, text=text)[0]
+ for kk in range(v.shape[0]):
+ for ii in range(v.shape[1]):
+ for jj in range(v.shape[2]):
+ vv = v[kk, ii, jj]
+ if vv < 0.0:
+ qout -= vv
+ else:
+ qin += vv
+ d["totim"][i] = t
+ d["time_step"][i] = k[0]
+ d["stress_period"] = k[1]
+ key = f"{text}_IN"
+ d[key][i] = qin
+ key = f"{text}_OUT"
+ d[key][i] = qout
+
+ diff = np.zeros((nbud, len(bud_lst)), dtype=float)
+ for i, key in enumerate(bud_lst):
+ diff[:, i] = d0[key] - d[key]
+ diffmax = np.abs(diff).max()
+ msg = f"maximum absolute total-budget difference ({diffmax}) "
+
+ # write summary
+ fpth = os.path.join(
+ test.workspace, f"{os.path.basename(test.name)}.bud.cmp.out"
+ )
+ with open(fpth, "w") as f:
for i in range(diff.shape[0]):
if i == 0:
line = f"{'TIME':>10s}"
- for idx, key in enumerate(self.bud_lst):
+ for j, key in enumerate(bud_lst):
line += f"{key + '_LST':>25s}"
line += f"{key + '_CBC':>25s}"
line += f"{key + '_DIF':>25s}"
f.write(line + "\n")
line = f"{d['totim'][i]:10g}"
- for idx, key in enumerate(self.bud_lst):
+ for j, key in enumerate(bud_lst):
line += f"{d0[key][i]:25g}"
line += f"{d[key][i]:25g}"
- line += f"{diff[i, idx]:25g}"
+ line += f"{diff[i, j]:25g}"
f.write(line + "\n")
- f.close()
-
- if diffmax > self.budtol:
- sim.success = False
- msg += f"exceeds {self.dtol}"
- assert diffmax < self.dtol, msg
- else:
- sim.success = True
- print(" " + msg)
-
-
-@parametrize_with_cases(
- "case",
- cases=[
- GwfCsubSkCases,
- ],
-)
-def test_mf6model(case, targets):
- data, sim, cmp, evl = case
- sim.write_simulation()
- if cmp:
- cmp.write_simulation()
- test = TestSimulation(
- name=data.name,
- exe_dict=targets,
- exfunc=evl,
- idxsim=0,
- mf6_regression=True,
+
+ if diffmax > budtol:
+ test.success = False
+ msg += f"exceeds {dtol}"
+ assert diffmax < dtol, msg
+ else:
+ test.success = True
+ print(" " + msg)
+
+
+@pytest.mark.parametrize("idx, name", enumerate(cases))
+def test_mf6model(idx, name, function_tmpdir, targets):
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ targets=targets,
+ compare="mf6_regression",
)
- test.set_model(sim.simulation_data.mfpath.get_sim_path(), testModel=False)
test.run()
- test.compare()
- evl(test, data)
diff --git a/autotest/test_gwf_csub_sk02.py b/autotest/test_gwf_csub_sk02.py
index 5ae15cf0b0f..62f546f77e9 100644
--- a/autotest/test_gwf_csub_sk02.py
+++ b/autotest/test_gwf_csub_sk02.py
@@ -3,20 +3,19 @@
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from modflow_devtools.misc import is_in_ci
-from simulation import TestSimulation
-
-ex = ["csub_sk02a", "csub_sk02b", "csub_sk02c", "csub_sk02d"]
-constantcv = [True for idx in range(len(ex))]
-cmppths = ["mf6_regression" for idx in range(len(ex))]
-tops = [150.0 for idx in range(len(ex))]
-newtons = [True for idx in range(len(ex))]
+
+cases = ["csub_sk02a", "csub_sk02b", "csub_sk02c", "csub_sk02d"]
+constantcv = [True for _ in range(len(cases))]
+cmppths = ["mf6_regression" for _ in range(len(cases))]
+tops = [150.0 for _ in range(len(cases))]
+newtons = [True for _ in range(len(cases))]
ump = [None, None, True, True]
iump = [0, 0, 1, 1]
-eslag = [True for idx in range(len(ex))]
+eslag = [True for _ in range(len(cases))]
icrcc = [0, 1, 0, 1]
-htol = [None for idx in range(len(ex))]
+htol = [None for _ in range(len(cases))]
dtol = 1e-3
bud_lst = [
"CSUB-CGELASTIC_IN",
@@ -28,10 +27,10 @@
# static model data
nlay, nrow, ncol = 3, 10, 10
nper = 31
-perlen = [1.0] + [365.2500000 for i in range(nper - 1)]
-nstp = [1] + [6 for i in range(nper - 1)]
-tsmult = [1.0] + [1.3 for i in range(nper - 1)]
-steady = [True] + [False for i in range(nper - 1)]
+perlen = [1.0] + [365.2500000 for _ in range(nper - 1)]
+nstp = [1] + [6 for _ in range(nper - 1)]
+tsmult = [1.0] + [1.3 for _ in range(nper - 1)]
+steady = [True] + [False for _ in range(nper - 1)]
delr, delc = 1000.0, 2000.0
top = 150.0
botm = [-100, -150.0, -350.0]
@@ -61,8 +60,8 @@
hclose, rclose, relax = 1e-9, 1e-6, 1.0
tdis_rc = []
-for idx in range(nper):
- tdis_rc.append((perlen[idx], nstp[idx], tsmult[idx]))
+for i in range(nper):
+ tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
# all cells are active
ib = 1
@@ -178,7 +177,7 @@
def get_model(idx, ws):
- name = ex[idx]
+ name = cases[idx]
newton = newtons[idx]
newtonoptions = None
imsla = "CG"
@@ -321,33 +320,28 @@ def get_model(idx, ws):
return sim
-def build_model(idx, dir):
-
+def build_models(idx, test):
# build MODFLOW 6 files
- ws = dir
- sim = get_model(idx, ws)
+ sim = get_model(idx, test.workspace)
# build comparison files
cpth = cmppths[idx]
- ws = os.path.join(dir, cpth)
+ ws = os.path.join(test.workspace, cpth)
mc = get_model(idx, ws)
-
return sim, mc
-def eval_comp(sim):
- print("evaluating compaction...")
-
+def check_output(idx, test):
# MODFLOW 6 total compaction results
- fpth = os.path.join(sim.simpath, "csub_obs.csv")
+ fpth = os.path.join(test.workspace, "csub_obs.csv")
try:
tc = np.genfromtxt(fpth, names=True, delimiter=",")
except:
assert False, f'could not load data from "{fpth}"'
# comparision total compaction results
- cpth = cmppths[sim.idxsim]
- fpth = os.path.join(sim.simpath, cpth, "csub_obs.csv")
+ cpth = cmppths[idx]
+ fpth = os.path.join(test.workspace, cpth, "csub_obs.csv")
try:
tc0 = np.genfromtxt(fpth, names=True, delimiter=",")
except:
@@ -360,27 +354,26 @@ def eval_comp(sim):
# write summary
fpth = os.path.join(
- sim.simpath, f"{os.path.basename(sim.name)}.comp.cmp.out"
+ test.workspace, f"{os.path.basename(test.name)}.comp.cmp.out"
)
- f = open(fpth, "w")
- for i in range(diff.shape[0]):
- line = f"{tc0['time'][i]:10.2g}"
- line += f"{tc['TCOMP3'][i]:10.2g}"
- line += f"{tc0['TCOMP3'][i]:10.2g}"
- line += f"{diff[i]:10.2g}"
- f.write(line + "\n")
- f.close()
+ with open(fpth, "w") as f:
+ for i in range(diff.shape[0]):
+ line = f"{tc0['time'][i]:10.2g}"
+ line += f"{tc['TCOMP3'][i]:10.2g}"
+ line += f"{tc0['TCOMP3'][i]:10.2g}"
+ line += f"{diff[i]:10.2g}"
+ f.write(line + "\n")
if diffmax > dtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
# get results from listing file
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.lst")
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.lst")
budl = flopy.utils.Mf6ListBudget(fpth)
names = list(bud_lst)
d0 = budl.get_budget(names=names)[0]
@@ -392,11 +385,11 @@ def eval_comp(sim):
d = np.recarray(nbud, dtype=dtype)
for key in bud_lst:
d[key] = 0.0
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.cbc")
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.cbc")
cobj = flopy.utils.CellBudgetFile(fpth, precision="double")
kk = cobj.get_kstpkper()
times = cobj.get_times()
- for idx, (k, t) in enumerate(zip(kk, times)):
+ for i, (k, t) in enumerate(zip(kk, times)):
for text in cbc_bud:
qin = 0.0
qout = 0.0
@@ -409,13 +402,13 @@ def eval_comp(sim):
qout -= vv
else:
qin += vv
- d["totim"][idx] = t
- d["time_step"][idx] = k[0]
+ d["totim"][i] = t
+ d["time_step"][i] = k[0]
d["stress_period"] = k[1]
key = f"{text}_IN"
- d[key][idx] = qin
+ d[key][i] = qin
key = f"{text}_OUT"
- d[key][idx] = qout
+ d[key][i] = qout
diff = np.zeros((nbud, len(bud_lst)), dtype=float)
for idx, key in enumerate(bud_lst):
@@ -425,49 +418,42 @@ def eval_comp(sim):
# write summary
fpth = os.path.join(
- sim.simpath, f"{os.path.basename(sim.name)}.bud.cmp.out"
+ test.workspace, f"{os.path.basename(test.name)}.bud.cmp.out"
)
- f = open(fpth, "w")
- for i in range(diff.shape[0]):
- if i == 0:
- line = f"{'TIME':>10s}"
- for idx, key in enumerate(bud_lst):
- line += f"{key + '_LST':>25s}"
- line += f"{key + '_CBC':>25s}"
- line += f"{key + '_DIF':>25s}"
+ with open(fpth, "w") as f:
+ for i in range(diff.shape[0]):
+ if i == 0:
+ line = f"{'TIME':>10s}"
+ for key in bud_lst:
+ line += f"{key + '_LST':>25s}"
+ line += f"{key + '_CBC':>25s}"
+ line += f"{key + '_DIF':>25s}"
+ f.write(line + "\n")
+ line = f"{d['totim'][i]:10g}"
+ for ii, key in enumerate(bud_lst):
+ line += f"{d0[key][i]:25g}"
+ line += f"{d[key][i]:25g}"
+ line += f"{diff[i, ii]:25g}"
f.write(line + "\n")
- line = f"{d['totim'][i]:10g}"
- for idx, key in enumerate(bud_lst):
- line += f"{d0[key][i]:25g}"
- line += f"{d[key][i]:25g}"
- line += f"{diff[i, idx]:25g}"
- f.write(line + "\n")
- f.close()
if diffmax > dtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name,
- exe_dict=targets,
- exfunc=eval_comp,
- htol=htol[idx],
- idxsim=idx,
- mf6_regression=True,
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ htol=htol[idx],
+ compare="mf6_regression",
)
+ test.run()
diff --git a/autotest/test_gwf_csub_sk03.py b/autotest/test_gwf_csub_sk03.py
index f886dba8a4b..a12fca651b0 100644
--- a/autotest/test_gwf_csub_sk03.py
+++ b/autotest/test_gwf_csub_sk03.py
@@ -4,15 +4,15 @@
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
-ex = ["csub_sk03a"]
-constantcv = [True for idx in range(len(ex))]
-cmppths = ["mf6_regression" for idx in range(len(ex))]
-newtons = [True for idx in range(len(ex))]
+cases = ["csub_sk03a"]
+constantcv = [True for _ in range(len(cases))]
+cmppths = ["mf6_regression" for _ in range(len(cases))]
+newtons = [True for _ in range(len(cases))]
icrcc = [0, 1, 0, 1]
-htol = [None for idx in range(len(ex))]
+htol = [None for _ in range(len(cases))]
dtol = 1e-3
bud_lst = [
"CSUB-CGELASTIC_IN",
@@ -31,14 +31,14 @@
totim = perlen.sum() - perlen[0]
nstp = [1, nsec * 2]
tsmult = [1.0, 1.00]
-steady = [True] + [False for i in range(nper - 1)]
+steady = [True] + [False for _ in range(nper - 1)]
# spatial discretization
ft2m = 1.0 / 3.28081
nlay, nrow, ncol = 3, 21, 20
delr = np.ones(ncol, dtype=float) * 0.5
-for idx in range(1, ncol):
- delr[idx] = min(delr[idx - 1] * 1.2, 15.0)
+for i in range(1, ncol):
+ delr[i] = min(delr[i - 1] * 1.2, 15.0)
delc = 50.0
top = 0.0
botm = np.array([-40, -70.0, -100.0], dtype=float) * ft2m
@@ -61,8 +61,8 @@
hclose, rclose, relax = 1e-9, 1e-6, 1.0
tdis_rc = []
-for idx in range(nper):
- tdis_rc.append((perlen[idx], nstp[idx], tsmult[idx]))
+for i in range(nper):
+ tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
# all cells are active
ib = 1
@@ -231,7 +231,7 @@
def get_model(idx, ws):
- name = ex[idx]
+ name = cases[idx]
newton = newtons[idx]
newtonoptions = None
imsla = "CG"
@@ -500,32 +500,28 @@ def get_model(idx, ws):
# SUB package problem 3
-def build_model(idx, dir):
-
+def build_models(idx, test):
# build MODFLOW 6 files
- ws = dir
- sim = get_model(idx, ws)
+ sim = get_model(idx, test.workspace)
# build comparison files
cpth = cmppths[idx]
- ws = os.path.join(dir, cpth)
+ ws = os.path.join(test.workspace, cpth)
mc = get_model(idx, ws)
return sim, mc
-def eval_comp(sim):
- print("evaluating compaction...")
-
+def check_output(idx, test):
# MODFLOW 6 total compaction results
- fpth = os.path.join(sim.simpath, "csub_obs.csv")
+ fpth = os.path.join(test.workspace, "csub_obs.csv")
try:
tc = np.genfromtxt(fpth, names=True, delimiter=",")
except:
assert False, f'could not load data from "{fpth}"'
# get results from listing file
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.lst")
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.lst")
budl = flopy.utils.Mf6ListBudget(fpth)
names = list(bud_lst)
d0 = budl.get_budget(names=names)[0]
@@ -537,11 +533,11 @@ def eval_comp(sim):
d = np.recarray(nbud, dtype=dtype)
for key in bud_lst:
d[key] = 0.0
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.cbc")
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.cbc")
cobj = flopy.utils.CellBudgetFile(fpth, precision="double")
kk = cobj.get_kstpkper()
times = cobj.get_times()
- for idx, (k, t) in enumerate(zip(kk, times)):
+ for i, (k, t) in enumerate(zip(kk, times)):
for text in cbc_bud:
qin = 0.0
qout = 0.0
@@ -554,66 +550,59 @@ def eval_comp(sim):
qout -= vv
else:
qin += vv
- d["totim"][idx] = t
- d["time_step"][idx] = k[0]
+ d["totim"][i] = t
+ d["time_step"][i] = k[0]
d["stress_period"] = k[1]
key = f"{text}_IN"
- d[key][idx] = qin
+ d[key][i] = qin
key = f"{text}_OUT"
- d[key][idx] = qout
+ d[key][i] = qout
diff = np.zeros((nbud, len(bud_lst)), dtype=float)
- for idx, key in enumerate(bud_lst):
- diff[:, idx] = d0[key] - d[key]
+ for i, key in enumerate(bud_lst):
+ diff[:, i] = d0[key] - d[key]
diffmax = np.abs(diff).max()
msg = f"maximum absolute total-budget difference ({diffmax}) "
# write summary
fpth = os.path.join(
- sim.simpath, f"{os.path.basename(sim.name)}.bud.cmp.out"
+ test.workspace, f"{os.path.basename(test.name)}.bud.cmp.out"
)
- f = open(fpth, "w")
- for i in range(diff.shape[0]):
- if i == 0:
- line = f"{'TIME':>10s}"
- for idx, key in enumerate(bud_lst):
- line += f"{key + '_LST':>25s}"
- line += f"{key + '_CBC':>25s}"
- line += f"{key + '_DIF':>25s}"
+ with open(fpth, "w") as f:
+ for i in range(diff.shape[0]):
+ if i == 0:
+ line = f"{'TIME':>10s}"
+ for key in bud_lst:
+ line += f"{key + '_LST':>25s}"
+ line += f"{key + '_CBC':>25s}"
+ line += f"{key + '_DIF':>25s}"
+ f.write(line + "\n")
+ line = f"{d['totim'][i]:10g}"
+ for ii, key in enumerate(bud_lst):
+ line += f"{d0[key][i]:25g}"
+ line += f"{d[key][i]:25g}"
+ line += f"{diff[i, ii]:25g}"
f.write(line + "\n")
- line = f"{d['totim'][i]:10g}"
- for idx, key in enumerate(bud_lst):
- line += f"{d0[key][i]:25g}"
- line += f"{d[key][i]:25g}"
- line += f"{diff[i, idx]:25g}"
- f.write(line + "\n")
- f.close()
if diffmax > dtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
@pytest.mark.slow
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name,
- exe_dict=targets,
- exfunc=eval_comp,
- htol=htol[idx],
- idxsim=idx,
- mf6_regression=True,
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ htol=htol[idx],
+ compare="mf6_regression",
)
+ test.run()
diff --git a/autotest/test_gwf_csub_sk04_nr.py b/autotest/test_gwf_csub_sk04_nr.py
index edb9f0409e8..8b0e5c748e6 100644
--- a/autotest/test_gwf_csub_sk04_nr.py
+++ b/autotest/test_gwf_csub_sk04_nr.py
@@ -3,10 +3,10 @@
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
-ex = (
+cases = (
"csub_sk04a",
"csub_sk04b",
"csub_sk04c",
@@ -33,10 +33,10 @@
nlay, nrow, ncol = 2, 1, 2
nper = 3
tsp0 = 1.0
-perlen = [tsp0] + [365.2500000 for i in range(nper - 1)]
-nstp = [1] + [200 for i in range(nper - 1)]
-tsmult = [1.0] + [1.0 for i in range(nper - 1)]
-steady = [True] + [False for i in range(nper - 1)]
+perlen = [tsp0] + [365.2500000 for _ in range(nper - 1)]
+nstp = [1] + [200 for _ in range(nper - 1)]
+tsmult = [1.0] + [1.0 for _ in range(nper - 1)]
+steady = [True] + [False for _ in range(nper - 1)]
delr, delc = 1000.0, 1000.0
top = 0.0
botm = [-10.0, -20.0]
@@ -55,8 +55,8 @@
hclose, rclose, relax = 1e-9, 1e-3, 1.0
tdis_rc = []
-for idx in range(nper):
- tdis_rc.append((perlen[idx], nstp[idx], tsmult[idx]))
+for i in range(nper):
+ tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
# all cells are active
ib = 1
@@ -77,12 +77,12 @@
crnd0[:, 0, 0] = 0.0
-def build_model(idx, dir):
- name = ex[idx]
+def build_models(idx, test):
+ name = cases[idx]
newton = newtons[idx]
# build MODFLOW 6 files
- ws = dir
+ ws = test.workspace
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
@@ -244,18 +244,16 @@ def build_model(idx, dir):
return sim, None
-def eval_comp(sim):
- print("evaluating compaction...")
-
+def check_output(idx, test):
# MODFLOW 6 total compaction results
- fpth = os.path.join(sim.simpath, "csub_obs.csv")
+ fpth = os.path.join(test.workspace, "csub_obs.csv")
try:
tc = np.genfromtxt(fpth, names=True, delimiter=",")
except:
assert False, f'could not load data from "{fpth}"'
# get results from listing file
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.lst")
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.lst")
budl = flopy.utils.Mf6ListBudget(fpth)
names = list(bud_lst)
d0 = budl.get_budget(names=names)[0]
@@ -267,11 +265,11 @@ def eval_comp(sim):
d = np.recarray(nbud, dtype=dtype)
for key in bud_lst:
d[key] = 0.0
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.cbc")
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.cbc")
cobj = flopy.utils.CellBudgetFile(fpth, precision="double")
kk = cobj.get_kstpkper()
times = cobj.get_times()
- for idx, (k, t) in enumerate(zip(kk, times)):
+ for i, (k, t) in enumerate(zip(kk, times)):
for text in cbc_bud:
qin = 0.0
qout = 0.0
@@ -284,64 +282,57 @@ def eval_comp(sim):
qout -= vv
else:
qin += vv
- d["totim"][idx] = t
- d["time_step"][idx] = k[0]
+ d["totim"][i] = t
+ d["time_step"][i] = k[0]
d["stress_period"] = k[1]
key = f"{text}_IN"
- d[key][idx] = qin
+ d[key][i] = qin
key = f"{text}_OUT"
- d[key][idx] = qout
+ d[key][i] = qout
diff = np.zeros((nbud, len(bud_lst)), dtype=float)
- for idx, key in enumerate(bud_lst):
- diff[:, idx] = d0[key] - d[key]
+ for i, key in enumerate(bud_lst):
+ diff[:, i] = d0[key] - d[key]
diffmax = np.abs(diff).max()
msg = f"maximum absolute total-budget difference ({diffmax}) "
# write summary
fpth = os.path.join(
- sim.simpath, f"{os.path.basename(sim.name)}.bud.cmp.out"
+ test.workspace, f"{os.path.basename(test.name)}.bud.cmp.out"
)
- f = open(fpth, "w")
- for i in range(diff.shape[0]):
- if i == 0:
- line = f"{'TIME':>10s}"
- for idx, key in enumerate(bud_lst):
- line += f"{key + '_LST':>25s}"
- line += f"{key + '_CBC':>25s}"
- line += f"{key + '_DIF':>25s}"
+ with open(fpth, "w") as f:
+ for i in range(diff.shape[0]):
+ if i == 0:
+ line = f"{'TIME':>10s}"
+ for key in bud_lst:
+ line += f"{key + '_LST':>25s}"
+ line += f"{key + '_CBC':>25s}"
+ line += f"{key + '_DIF':>25s}"
+ f.write(line + "\n")
+ line = f"{d['totim'][i]:10g}"
+ for ii, key in enumerate(bud_lst):
+ line += f"{d0[key][i]:25g}"
+ line += f"{d[key][i]:25g}"
+ line += f"{diff[i, ii]:25g}"
f.write(line + "\n")
- line = f"{d['totim'][i]:10g}"
- for idx, key in enumerate(bud_lst):
- line += f"{d0[key][i]:25g}"
- line += f"{d[key][i]:25g}"
- line += f"{diff[i, idx]:25g}"
- f.write(line + "\n")
- f.close()
if diffmax > budtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name,
- exe_dict=targets,
- exfunc=eval_comp,
- htol=htol,
- idxsim=idx,
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ htol=htol,
)
+ test.run()
diff --git a/autotest/test_gwf_csub_sub01.py b/autotest/test_gwf_csub_sub01.py
index 4327c62e3fb..d0e40fcc99a 100644
--- a/autotest/test_gwf_csub_sub01.py
+++ b/autotest/test_gwf_csub_sub01.py
@@ -3,14 +3,14 @@
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
+cases = ["csub_sub01a", "csub_sub01b"]
paktest = "csub"
budtol = 1e-2
-ex = ["csub_sub01a", "csub_sub01b"]
compression_indices = [None, True]
-ndcell = [19] * len(ex)
+ndcell = [19] * len(cases)
# static model data
# spatial discretization
@@ -23,10 +23,10 @@
# temporal discretization
nper = 1
-perlen = [1000.0 for i in range(nper)]
-nstp = [100 for i in range(nper)]
-tsmult = [1.05 for i in range(nper)]
-steady = [False for i in range(nper)]
+perlen = [1000.0 for _ in range(nper)]
+nstp = [100 for _ in range(nper)]
+tsmult = [1.05 for _ in range(nper)]
+steady = [False for _ in range(nper)]
strt = 0.0
strt6 = 1.0
@@ -41,8 +41,8 @@
hclose, rclose, relax = 1e-6, 1e-6, 0.97
tdis_rc = []
-for idx in range(nper):
- tdis_rc.append((perlen[idx], nstp[idx], tsmult[idx]))
+for i in range(nper):
+ tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
ib = 1
@@ -78,7 +78,7 @@
def get_model(idx, ws):
- name = ex[idx]
+ name = cases[idx]
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
@@ -215,31 +215,27 @@ def get_model(idx, ws):
return sim
-def build_model(idx, dir):
-
+def build_models(idx, test):
# build MODFLOW 6 files
- ws = dir
- sim = get_model(idx, ws)
+ sim = get_model(idx, test.workspace)
# build MODFLOW-2005 files
- ws = os.path.join(dir, "mf6_regression")
+ ws = os.path.join(test.workspace, "mf6_regression")
mc = get_model(idx, ws)
return sim, mc
-def eval_sub(sim):
- print("evaluating subsidence...")
-
+def check_output(idx, test):
# MODFLOW 6 total compaction results
- fpth = os.path.join(sim.simpath, "csub_obs.csv")
+ fpth = os.path.join(test.workspace, "csub_obs.csv")
try:
tc = np.genfromtxt(fpth, names=True, delimiter=",")
except:
assert False, f'could not load data from "{fpth}"'
# comparison total compaction results
- fpth = os.path.join(sim.simpath, "mf6_regression", "csub_obs.csv")
+ fpth = os.path.join(test.workspace, "mf6_regression", "csub_obs.csv")
try:
tc0 = np.genfromtxt(fpth, names=True, delimiter=",")
except:
@@ -253,40 +249,37 @@ def eval_sub(sim):
# write summary
fpth = os.path.join(
- sim.simpath, f"{os.path.basename(sim.name)}.comp.cmp.out"
+ test.workspace, f"{os.path.basename(test.name)}.comp.cmp.out"
)
- f = open(fpth, "w")
- line = f"{'TOTIM':>15s}"
- line += f" {'CSUB':>15s}"
- line += f" {'MF':>15s}"
- line += f" {'DIFF':>15s}"
- f.write(line + "\n")
- for i in range(diff.shape[0]):
- line = f"{tc0['time'][i]:15g}"
- line += f" {tc['TCOMP'][i]:15g}"
- line += f" {tc0['TCOMP'][i]:15g}"
- line += f" {diff[i]:15g}"
+ with open(fpth, "w") as f:
+ line = f"{'TOTIM':>15s}"
+ line += f" {'CSUB':>15s}"
+ line += f" {'MF':>15s}"
+ line += f" {'DIFF':>15s}"
f.write(line + "\n")
- f.close()
+ for i in range(diff.shape[0]):
+ line = f"{tc0['time'][i]:15g}"
+ line += f" {tc['TCOMP'][i]:15g}"
+ line += f" {tc0['TCOMP'][i]:15g}"
+ line += f" {diff[i]:15g}"
+ f.write(line + "\n")
if diffmax > dtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
# compare budgets
- cbc_compare(sim)
-
- return
+ cbc_compare(test)
# compare cbc and lst budgets
-def cbc_compare(sim):
+def cbc_compare(test):
# open cbc file
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.cbc")
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.cbc")
cobj = flopy.utils.CellBudgetFile(fpth, precision="double")
# build list of cbc data to retrieve
@@ -303,7 +296,7 @@ def cbc_compare(sim):
bud_lst.append(f"{t}_OUT")
# get results from listing file
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.lst")
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.lst")
budl = flopy.utils.Mf6ListBudget(fpth)
names = list(bud_lst)
d0 = budl.get_budget(names=names)[0]
@@ -316,7 +309,7 @@ def cbc_compare(sim):
# get data from cbc dile
kk = cobj.get_kstpkper()
times = cobj.get_times()
- for idx, (k, t) in enumerate(zip(kk, times)):
+ for i, (k, t) in enumerate(zip(kk, times)):
for text in cbc_bud:
qin = 0.0
qout = 0.0
@@ -334,64 +327,57 @@ def cbc_compare(sim):
qout -= vv
else:
qin += vv
- d["totim"][idx] = t
- d["time_step"][idx] = k[0]
+ d["totim"][i] = t
+ d["time_step"][i] = k[0]
d["stress_period"] = k[1]
key = f"{text}_IN"
- d[key][idx] = qin
+ d[key][i] = qin
key = f"{text}_OUT"
- d[key][idx] = qout
+ d[key][i] = qout
diff = np.zeros((nbud, len(bud_lst)), dtype=float)
- for idx, key in enumerate(bud_lst):
- diff[:, idx] = d0[key] - d[key]
+ for i, key in enumerate(bud_lst):
+ diff[:, i] = d0[key] - d[key]
diffmax = np.abs(diff).max()
msg = f"maximum absolute total-budget difference ({diffmax}) "
# write summary
fpth = os.path.join(
- sim.simpath, f"{os.path.basename(sim.name)}.bud.cmp.out"
+ test.workspace, f"{os.path.basename(test.name)}.bud.cmp.out"
)
- f = open(fpth, "w")
- for i in range(diff.shape[0]):
- if i == 0:
- line = f"{'TIME':>10s}"
- for idx, key in enumerate(bud_lst):
- line += f"{key + '_LST':>25s}"
- line += f"{key + '_CBC':>25s}"
- line += f"{key + '_DIF':>25s}"
+ with open(fpth, "w") as f:
+ for i in range(diff.shape[0]):
+ if i == 0:
+ line = f"{'TIME':>10s}"
+ for key in bud_lst:
+ line += f"{key + '_LST':>25s}"
+ line += f"{key + '_CBC':>25s}"
+ line += f"{key + '_DIF':>25s}"
+ f.write(line + "\n")
+ line = f"{d['totim'][i]:10g}"
+ for ii, key in enumerate(bud_lst):
+ line += f"{d0[key][i]:25g}"
+ line += f"{d[key][i]:25g}"
+ line += f"{diff[i, ii]:25g}"
f.write(line + "\n")
- line = f"{d['totim'][i]:10g}"
- for idx, key in enumerate(bud_lst):
- line += f"{d0[key][i]:25g}"
- line += f"{d[key][i]:25g}"
- line += f"{diff[i, idx]:25g}"
- f.write(line + "\n")
- f.close()
if diffmax > budtol:
- sim.success = False
+ test.success = False
msg += f"diffmax {diffmax} exceeds tolerance {budtol}"
assert diffmax < budtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name,
- exe_dict=targets,
- exfunc=eval_sub,
- idxsim=idx,
- mf6_regression=True,
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ targets=targets,
+ compare="mf6_regression",
)
+ test.run()
diff --git a/autotest/test_gwf_csub_sub01_adjmat.py b/autotest/test_gwf_csub_sub01_adjmat.py
index 694799c26da..c37adcaf16d 100644
--- a/autotest/test_gwf_csub_sub01_adjmat.py
+++ b/autotest/test_gwf_csub_sub01_adjmat.py
@@ -3,16 +3,15 @@
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
+cases = ["csub_sub01_adj"]
paktest = "csub"
budtol = 1e-2
-
compdir = "mf6"
-ex = ["csub_sub01_adj"]
compression_indices = [None]
-ndcell = [19] * len(ex)
+ndcell = [19] * len(cases)
# static model data
# spatial discretization
@@ -25,10 +24,10 @@
# temporal discretization
nper = 1
-perlen = [1000.0 for i in range(nper)]
-nstp = [100 for i in range(nper)]
-tsmult = [1.05 for i in range(nper)]
-steady = [False for i in range(nper)]
+perlen = [1000.0 for _ in range(nper)]
+nstp = [100 for _ in range(nper)]
+tsmult = [1.05 for _ in range(nper)]
+steady = [False for _ in range(nper)]
strt = 0.0
strt6 = 1.0
@@ -43,8 +42,8 @@
hclose, rclose, relax = 1e-12, 1e-6, 0.97
tdis_rc = []
-for idx in range(nper):
- tdis_rc.append((perlen[idx], nstp[idx], tsmult[idx]))
+for i in range(nper):
+ tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
ib = 1
@@ -85,18 +84,18 @@
]
-def build_model(idx, dir):
- sim = get_model(idx, dir, adjustmat=True)
+def build_models(idx, test):
+ sim = get_model(idx, test.workspace, adjustmat=True)
# build MODFLOW-6 with constant material properties
- pth = os.path.join(dir, compdir)
+ pth = os.path.join(test.workspace, compdir)
mc = get_model(idx, pth, None)
return sim, mc
def get_model(idx, dir, adjustmat=False):
- name = ex[idx]
+ name = cases[idx]
# build MODFLOW 6 files
ws = dir
@@ -221,18 +220,16 @@ def calc_theta_thick(comp, thickini=1.0):
return poro, b
-def eval_sub(sim):
- print("evaluating subsidence...")
-
+def check_output(idx, test):
# MODFLOW 6 compaction results
- fpth = os.path.join(sim.simpath, "csub_obs.csv")
+ fpth = os.path.join(test.workspace, "csub_obs.csv")
try:
tc = np.genfromtxt(fpth, names=True, delimiter=",")
except:
assert False, f'could not load data from "{fpth}"'
# MODFLOW 6 base compaction results
- fpth = os.path.join(sim.simpath, compdir, "csub_obs.csv")
+ fpth = os.path.join(test.workspace, compdir, "csub_obs.csv")
try:
tcb = np.genfromtxt(fpth, names=True, delimiter=",")
except:
@@ -246,28 +243,27 @@ def eval_sub(sim):
# write summary
fpth = os.path.join(
- sim.simpath, f"{os.path.basename(sim.name)}.comp.cmp.out"
+ test.workspace, f"{os.path.basename(test.name)}.comp.cmp.out"
)
- f = open(fpth, "w")
- line = f"{'TOTIM':>15s}"
- line += f" {'CSUB':>15s}"
- line += f" {'MF':>15s}"
- line += f" {'DIFF':>15s}"
- f.write(line + "\n")
- for i in range(diff.shape[0]):
- line = f"{tc['time'][i]:15g}"
- line += f" {tc['TCOMP'][i]:15g}"
- line += f" {tcb['TCOMP'][i]:15g}"
- line += f" {diff[i]:15g}"
+ with open(fpth, "w") as f:
+ line = f"{'TOTIM':>15s}"
+ line += f" {'CSUB':>15s}"
+ line += f" {'MF':>15s}"
+ line += f" {'DIFF':>15s}"
f.write(line + "\n")
- f.close()
+ for i in range(diff.shape[0]):
+ line = f"{tc['time'][i]:15g}"
+ line += f" {tc['TCOMP'][i]:15g}"
+ line += f" {tcb['TCOMP'][i]:15g}"
+ line += f" {diff[i]:15g}"
+ f.write(line + "\n")
if diffmax > dtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol:15.7g}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
# calculate theta and porosity from total interbed compaction
@@ -287,11 +283,11 @@ def eval_sub(sim):
+ f"difference ({diffmax:15.7g}) "
)
if diffmax > dtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol:15.7g}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
# calculate theta and porosity from interbed cell compaction
@@ -317,11 +313,11 @@ def eval_sub(sim):
+ f"({diffmax:15.7g}) "
)
if diffmax > dtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol:15.7g}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
calci["THICK"] += calc["THICK"]
calci["THETA"] += calc["THICK"] * calc["THETA"]
@@ -337,21 +333,21 @@ def eval_sub(sim):
)
msg += "calculated from individual interbed cell values "
if diffmax > dtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol:15.7g}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
# compare budgets
- cbc_compare(sim)
+ cbc_compare(test)
# compare cbc and lst budgets
-def cbc_compare(sim):
+def cbc_compare(test):
# open cbc file
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.cbc")
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.cbc")
cobj = flopy.utils.CellBudgetFile(fpth, precision="double")
# build list of cbc data to retrieve
@@ -368,7 +364,7 @@ def cbc_compare(sim):
bud_lst.append(f"{t}_OUT")
# get results from listing file
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.lst")
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.lst")
budl = flopy.utils.Mf6ListBudget(fpth)
names = list(bud_lst)
d0 = budl.get_budget(names=names)[0]
@@ -381,7 +377,7 @@ def cbc_compare(sim):
# get data from cbc dile
kk = cobj.get_kstpkper()
times = cobj.get_times()
- for idx, (k, t) in enumerate(zip(kk, times)):
+ for i, (k, t) in enumerate(zip(kk, times)):
for text in cbc_bud:
qin = 0.0
qout = 0.0
@@ -399,57 +395,58 @@ def cbc_compare(sim):
qout -= vv
else:
qin += vv
- d["totim"][idx] = t
- d["time_step"][idx] = k[0]
+ d["totim"][i] = t
+ d["time_step"][i] = k[0]
d["stress_period"] = k[1]
key = f"{text}_IN"
- d[key][idx] = qin
+ d[key][i] = qin
key = f"{text}_OUT"
- d[key][idx] = qout
+ d[key][i] = qout
diff = np.zeros((nbud, len(bud_lst)), dtype=float)
- for idx, key in enumerate(bud_lst):
- diff[:, idx] = d0[key] - d[key]
+ for i, key in enumerate(bud_lst):
+ diff[:, i] = d0[key] - d[key]
diffmax = np.abs(diff).max()
msg = f"maximum absolute total-budget difference ({diffmax}) "
# write summary
fpth = os.path.join(
- sim.simpath, f"{os.path.basename(sim.name)}.bud.cmp.out"
+ test.workspace, f"{os.path.basename(test.name)}.bud.cmp.out"
)
- f = open(fpth, "w")
- for i in range(diff.shape[0]):
- if i == 0:
- line = f"{'TIME':>10s}"
- for idx, key in enumerate(bud_lst):
- line += f"{key + '_LST':>25s}"
- line += f"{key + '_CBC':>25s}"
- line += f"{key + '_DIF':>25s}"
+ with open(fpth, "w") as f:
+ for i in range(diff.shape[0]):
+ if i == 0:
+ line = f"{'TIME':>10s}"
+ for key in bud_lst:
+ line += f"{key + '_LST':>25s}"
+ line += f"{key + '_CBC':>25s}"
+ line += f"{key + '_DIF':>25s}"
+ f.write(line + "\n")
+ line = f"{d['totim'][i]:10g}"
+ for ii, key in enumerate(bud_lst):
+ line += f"{d0[key][i]:25g}"
+ line += f"{d[key][i]:25g}"
+ line += f"{diff[i, ii]:25g}"
f.write(line + "\n")
- line = f"{d['totim'][i]:10g}"
- for idx, key in enumerate(bud_lst):
- line += f"{d0[key][i]:25g}"
- line += f"{d[key][i]:25g}"
- line += f"{diff[i, idx]:25g}"
- f.write(line + "\n")
- f.close()
dtol = 1e-6
if diffmax > budtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
-@pytest.mark.parametrize("name", ex)
-def test_mf6model(name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, 0, str(function_tmpdir))
- test.run(
- TestSimulation(name=name, exe_dict=targets, exfunc=eval_sub, idxsim=0),
- str(function_tmpdir),
+@pytest.mark.parametrize("idx, name", enumerate(cases))
+def test_mf6model(idx, name, function_tmpdir, targets):
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ targets=targets,
)
+ test.run()
diff --git a/autotest/test_gwf_csub_sub01_elastic.py b/autotest/test_gwf_csub_sub01_elastic.py
index 815910827b4..05a59a2045a 100644
--- a/autotest/test_gwf_csub_sub01_elastic.py
+++ b/autotest/test_gwf_csub_sub01_elastic.py
@@ -3,14 +3,14 @@
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
+cases = ["csub_sub01_elasa"]
cmppth = "mf6"
paktest = "csub"
dtol = 1e-3
budtol = 1e-2
-ex = ["csub_sub01_elasa"]
ndcell = [19]
# static model data
@@ -24,13 +24,13 @@
# temporal discretization
nper = 1
-perlen = [1000.0 for i in range(nper)]
-nstp = [100 for i in range(nper)]
-tsmult = [1.05 for i in range(nper)]
-steady = [False for i in range(nper)]
+perlen = [1000.0 for _ in range(nper)]
+nstp = [100 for _ in range(nper)]
+tsmult = [1.05 for _ in range(nper)]
+steady = [False for _ in range(nper)]
tdis_rc = []
-for idx in range(nper):
- tdis_rc.append((perlen[idx], nstp[idx], tsmult[idx]))
+for i in range(nper):
+ tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
strt = 0.0
strt6 = 1.0
@@ -90,7 +90,7 @@
def build_mf6(idx, ws, newton=None):
- name = ex[idx]
+ name = cases[idx]
# build MODFLOW 6 files
sim = flopy.mf6.MFSimulation(
@@ -176,28 +176,23 @@ def build_mf6(idx, ws, newton=None):
return sim
-def build_model(idx, dir):
- ws = dir
- sim = build_mf6(idx, ws)
-
- ws = os.path.join(ws, cmppth)
+def build_models(idx, test):
+ sim = build_mf6(idx, test.workspace)
+ ws = os.path.join(test.workspace, cmppth)
mc = build_mf6(idx, ws, newton="NEWTON")
-
return sim, mc
-def eval_sub(sim):
- print("evaluating subsidence...")
-
+def check_output(idx, test):
# MODFLOW 6 total compaction results
- fpth = os.path.join(sim.simpath, "csub_obs.csv")
+ fpth = os.path.join(test.workspace, "csub_obs.csv")
try:
tc = np.genfromtxt(fpth, names=True, delimiter=",")
except:
assert False, f'could not load data from "{fpth}"'
# MODFLOW 6 with newton-raphson
- fpth = os.path.join(sim.simpath, cmppth, "csub_obs.csv")
+ fpth = os.path.join(test.workspace, cmppth, "csub_obs.csv")
try:
tci = np.genfromtxt(fpth, names=True, delimiter=",")
except:
@@ -216,42 +211,39 @@ def eval_sub(sim):
# write summary
fpth = os.path.join(
- sim.simpath, f"{os.path.basename(sim.name)}.comp.cmp.out"
+ test.workspace, f"{os.path.basename(test.name)}.comp.cmp.out"
)
- f = open(fpth, "w")
- line = f"{'TOTIM':>15s}"
- for tag in tc.dtype.names[1:]:
- line += f" {f'{tag}_SK':>15s}"
- line += f" {f'{tag}_SKIB':>15s}"
- line += f" {f'{tag}_DIFF':>15s}"
- f.write(line + "\n")
- for i in range(diff.shape[0]):
- line = f"{tc['time'][i]:15g}"
+ with open(fpth, "w") as f:
+ line = f"{'TOTIM':>15s}"
for tag in tc.dtype.names[1:]:
- line += f" {tc[tag][i]:15g}"
- line += f" {tci[tag][i]:15g}"
- line += f" {tc[tag][i] - tci[tag][i]:15g}"
+ line += f" {f'{tag}_SK':>15s}"
+ line += f" {f'{tag}_SKIB':>15s}"
+ line += f" {f'{tag}_DIFF':>15s}"
f.write(line + "\n")
- f.close()
+ for i in range(diff.shape[0]):
+ line = f"{tc['time'][i]:15g}"
+ for tag in tc.dtype.names[1:]:
+ line += f" {tc[tag][i]:15g}"
+ line += f" {tci[tag][i]:15g}"
+ line += f" {tc[tag][i] - tci[tag][i]:15g}"
+ f.write(line + "\n")
if diffmax > dtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
# compare budgets
- cbc_compare(sim)
-
- return
+ cbc_compare(test)
# compare cbc and lst budgets
-def cbc_compare(sim):
+def cbc_compare(test):
# open cbc file
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.cbc")
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.cbc")
cobj = flopy.utils.CellBudgetFile(fpth, precision="double")
# build list of cbc data to retrieve
@@ -268,7 +260,7 @@ def cbc_compare(sim):
bud_lst.append(f"{t}_OUT")
# get results from listing file
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.lst")
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.lst")
budl = flopy.utils.Mf6ListBudget(fpth)
names = list(bud_lst)
d0 = budl.get_budget(names=names)[0]
@@ -281,7 +273,7 @@ def cbc_compare(sim):
# get data from cbc dile
kk = cobj.get_kstpkper()
times = cobj.get_times()
- for idx, (k, t) in enumerate(zip(kk, times)):
+ for i, (k, t) in enumerate(zip(kk, times)):
for text in cbc_bud:
qin = 0.0
qout = 0.0
@@ -299,60 +291,56 @@ def cbc_compare(sim):
qout -= vv
else:
qin += vv
- d["totim"][idx] = t
- d["time_step"][idx] = k[0]
+ d["totim"][i] = t
+ d["time_step"][i] = k[0]
d["stress_period"] = k[1]
key = f"{text}_IN"
- d[key][idx] = qin
+ d[key][i] = qin
key = f"{text}_OUT"
- d[key][idx] = qout
+ d[key][i] = qout
diff = np.zeros((nbud, len(bud_lst)), dtype=float)
- for idx, key in enumerate(bud_lst):
- diff[:, idx] = d0[key] - d[key]
+ for i, key in enumerate(bud_lst):
+ diff[:, i] = d0[key] - d[key]
diffmax = np.abs(diff).max()
msg = f"maximum absolute total-budget difference ({diffmax}) "
# write summary
fpth = os.path.join(
- sim.simpath, f"{os.path.basename(sim.name)}.bud.cmp.out"
+ test.workspace, f"{os.path.basename(test.name)}.bud.cmp.out"
)
- f = open(fpth, "w")
- for i in range(diff.shape[0]):
- if i == 0:
- line = f"{'TIME':>10s}"
- for idx, key in enumerate(bud_lst):
- line += f"{key + '_LST':>25s}"
- line += f"{key + '_CBC':>25s}"
- line += f"{key + '_DIF':>25s}"
+ with open(fpth, "w") as f:
+ for i in range(diff.shape[0]):
+ if i == 0:
+ line = f"{'TIME':>10s}"
+ for key in bud_lst:
+ line += f"{key + '_LST':>25s}"
+ line += f"{key + '_CBC':>25s}"
+ line += f"{key + '_DIF':>25s}"
+ f.write(line + "\n")
+ line = f"{d['totim'][i]:10g}"
+ for ii, key in enumerate(bud_lst):
+ line += f"{d0[key][i]:25g}"
+ line += f"{d[key][i]:25g}"
+ line += f"{diff[i, ii]:25g}"
f.write(line + "\n")
- line = f"{d['totim'][i]:10g}"
- for idx, key in enumerate(bud_lst):
- line += f"{d0[key][i]:25g}"
- line += f"{d[key][i]:25g}"
- line += f"{diff[i, idx]:25g}"
- f.write(line + "\n")
- f.close()
if diffmax > budtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, exfunc=eval_sub, idxsim=idx
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ targets=targets,
)
+ test.run()
diff --git a/autotest/test_gwf_csub_sub01_pch.py b/autotest/test_gwf_csub_sub01_pch.py
index b4b12c81e5b..970e665a3c7 100644
--- a/autotest/test_gwf_csub_sub01_pch.py
+++ b/autotest/test_gwf_csub_sub01_pch.py
@@ -3,14 +3,14 @@
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
+cases = ["csub_sub01_pch"]
paktest = "csub"
budtol = 1e-2
compdir = "mf6"
-ex = ["csub_sub01_pch"]
-ndcell = [19] * len(ex)
+ndcell = [19] * len(cases)
# static model data
# spatial discretization
@@ -23,10 +23,10 @@
# temporal discretization
nper = 1
-perlen = [1000.0 for i in range(nper)]
-nstp = [100 for i in range(nper)]
-tsmult = [1.05 for i in range(nper)]
-steady = [False for i in range(nper)]
+perlen = [1000.0 for _ in range(nper)]
+nstp = [100 for _ in range(nper)]
+tsmult = [1.05 for _ in range(nper)]
+steady = [False for _ in range(nper)]
strt = 0.0
strt6 = 1.0
@@ -41,8 +41,8 @@
hclose, rclose, relax = 1e-12, 1e-6, 0.97
tdis_rc = []
-for idx in range(nper):
- tdis_rc.append((perlen[idx], nstp[idx], tsmult[idx]))
+for i in range(nper):
+ tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
ib = 1
@@ -69,18 +69,18 @@
thick = [1.0]
-def build_model(idx, dir):
- sim = get_model(idx, dir, pch=True)
+def build_models(idx, test):
+ sim = get_model(idx, test.workspace, pch=True)
# build MODFLOW-6 with constant material properties
- pth = os.path.join(dir, compdir)
+ pth = os.path.join(test.workspace, compdir)
mc = get_model(idx, pth)
return sim, mc
def get_model(idx, dir, pch=None):
- name = ex[idx]
+ name = cases[idx]
# build MODFLOW 6 files
ws = dir
@@ -221,18 +221,16 @@ def get_model(idx, dir, pch=None):
return sim
-def eval_sub(sim):
- print("evaluating subsidence...")
-
+def check_output(idx, test):
# MODFLOW 6 compaction results
- fpth = os.path.join(sim.simpath, "csub_obs.csv")
+ fpth = os.path.join(test.workspace, "csub_obs.csv")
try:
tc = np.genfromtxt(fpth, names=True, delimiter=",")
except:
assert False, f'could not load data from "{fpth}"'
# MODFLOW 6 base compaction results
- fpth = os.path.join(sim.simpath, compdir, "csub_obs.csv")
+ fpth = os.path.join(test.workspace, compdir, "csub_obs.csv")
try:
tcb = np.genfromtxt(fpth, names=True, delimiter=",")
except:
@@ -246,38 +244,37 @@ def eval_sub(sim):
# write summary
fpth = os.path.join(
- sim.simpath, f"{os.path.basename(sim.name)}.comp.cmp.out"
+ test.workspace, f"{os.path.basename(test.name)}.comp.cmp.out"
)
- f = open(fpth, "w")
- line = f"{'TOTIM':>15s}"
- line += f" {'CSUB':>15s}"
- line += f" {'MF':>15s}"
- line += f" {'DIFF':>15s}"
- f.write(line + "\n")
- for i in range(diff.shape[0]):
- line = f"{tc['time'][i]:15g}"
- line += f" {tc['TCOMP'][i]:15g}"
- line += f" {tcb['TCOMP'][i]:15g}"
- line += f" {diff[i]:15g}"
+ with open(fpth, "w") as f:
+ line = f"{'TOTIM':>15s}"
+ line += f" {'CSUB':>15s}"
+ line += f" {'MF':>15s}"
+ line += f" {'DIFF':>15s}"
f.write(line + "\n")
- f.close()
+ for i in range(diff.shape[0]):
+ line = f"{tc['time'][i]:15g}"
+ line += f" {tc['TCOMP'][i]:15g}"
+ line += f" {tcb['TCOMP'][i]:15g}"
+ line += f" {diff[i]:15g}"
+ f.write(line + "\n")
if diffmax > dtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol:15.7g}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
# compare budgets
- cbc_compare(sim)
+ cbc_compare(test)
# compare cbc and lst budgets
-def cbc_compare(sim):
+def cbc_compare(test):
# open cbc file
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.cbc")
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.cbc")
cobj = flopy.utils.CellBudgetFile(fpth, precision="double")
# build list of cbc data to retrieve
@@ -294,7 +291,7 @@ def cbc_compare(sim):
bud_lst.append(f"{t}_OUT")
# get results from listing file
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.lst")
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.lst")
budl = flopy.utils.Mf6ListBudget(fpth)
names = list(bud_lst)
d0 = budl.get_budget(names=names)[0]
@@ -307,7 +304,7 @@ def cbc_compare(sim):
# get data from cbc dile
kk = cobj.get_kstpkper()
times = cobj.get_times()
- for idx, (k, t) in enumerate(zip(kk, times)):
+ for i, (k, t) in enumerate(zip(kk, times)):
for text in cbc_bud:
qin = 0.0
qout = 0.0
@@ -325,62 +322,58 @@ def cbc_compare(sim):
qout -= vv
else:
qin += vv
- d["totim"][idx] = t
- d["time_step"][idx] = k[0]
+ d["totim"][i] = t
+ d["time_step"][i] = k[0]
d["stress_period"] = k[1]
key = f"{text}_IN"
- d[key][idx] = qin
+ d[key][i] = qin
key = f"{text}_OUT"
- d[key][idx] = qout
+ d[key][i] = qout
diff = np.zeros((nbud, len(bud_lst)), dtype=float)
- for idx, key in enumerate(bud_lst):
- diff[:, idx] = d0[key] - d[key]
+ for i, key in enumerate(bud_lst):
+ diff[:, i] = d0[key] - d[key]
diffmax = np.abs(diff).max()
msg = f"maximum absolute total-budget difference ({diffmax}) "
# write summary
fpth = os.path.join(
- sim.simpath, f"{os.path.basename(sim.name)}.bud.cmp.out"
+ test.workspace, f"{os.path.basename(test.name)}.bud.cmp.out"
)
- f = open(fpth, "w")
- for i in range(diff.shape[0]):
- if i == 0:
- line = f"{'TIME':>10s}"
- for idx, key in enumerate(bud_lst):
- line += f"{key + '_LST':>25s}"
- line += f"{key + '_CBC':>25s}"
- line += f"{key + '_DIF':>25s}"
+ with open(fpth, "w") as f:
+ for i in range(diff.shape[0]):
+ if i == 0:
+ line = f"{'TIME':>10s}"
+ for key in bud_lst:
+ line += f"{key + '_LST':>25s}"
+ line += f"{key + '_CBC':>25s}"
+ line += f"{key + '_DIF':>25s}"
+ f.write(line + "\n")
+ line = f"{d['totim'][i]:10g}"
+ for ii, key in enumerate(bud_lst):
+ line += f"{d0[key][i]:25g}"
+ line += f"{d[key][i]:25g}"
+ line += f"{diff[i, ii]:25g}"
f.write(line + "\n")
- line = f"{d['totim'][i]:10g}"
- for idx, key in enumerate(bud_lst):
- line += f"{d0[key][i]:25g}"
- line += f"{d[key][i]:25g}"
- line += f"{diff[i, idx]:25g}"
- f.write(line + "\n")
- f.close()
dtol = 1e-6
if diffmax > budtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, exfunc=eval_sub, idxsim=idx
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ targets=targets,
)
+ test.run()
diff --git a/autotest/test_gwf_csub_sub02.py b/autotest/test_gwf_csub_sub02.py
index 7596dfc1875..07985c835d5 100644
--- a/autotest/test_gwf_csub_sub02.py
+++ b/autotest/test_gwf_csub_sub02.py
@@ -2,10 +2,10 @@
import flopy
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
-ex = [
+cases = [
"csub_sub02a",
"csub_sub02b",
"csub_sub02c",
@@ -23,10 +23,10 @@
# static model data
nlay, nrow, ncol = 1, 1, 1
nper = 10
-perlen = [182.625 for i in range(nper)]
-nstp = [10 for i in range(nper)]
-tsmult = [1.05 for i in range(nper)]
-steady = [False for i in range(nper)]
+perlen = [182.625 for _ in range(nper)]
+nstp = [10 for _ in range(nper)]
+tsmult = [1.05 for _ in range(nper)]
+steady = [False for _ in range(nper)]
delr, delc = 1000.0, 1000.0
top = -100.0
botm = [-600.0]
@@ -41,8 +41,8 @@
hclose, rclose, relax = 1e-6, 1e-6, 0.97
tdis_rc = []
-for idx in range(nper):
- tdis_rc.append((perlen[idx], nstp[idx], tsmult[idx]))
+for i in range(nper):
+ tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
ib = 1
@@ -76,7 +76,7 @@
def get_model(idx, ws):
- name = ex[idx]
+ name = cases[idx]
ss = 1.14e-3
sc6 = True
if not storagecoeff[idx]:
@@ -200,23 +200,20 @@ def get_model(idx, ws):
return sim
-def build_model(idx, dir):
- ws = dir
- sim = get_model(idx, ws)
-
- ws = os.path.join(dir, cmppth)
+def build_models(idx, test):
+ sim = get_model(idx, test.workspace)
+ ws = os.path.join(test.workspace, cmppth)
mc = get_model(idx, ws)
return sim, mc
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(name=name, exe_dict=targets, mf6_regression=True),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ targets=targets,
+ compare="mf6_regression",
)
+ test.run()
diff --git a/autotest/test_gwf_csub_sub03.py b/autotest/test_gwf_csub_sub03.py
index dbb848c2b3c..cdd41d0b6ea 100644
--- a/autotest/test_gwf_csub_sub03.py
+++ b/autotest/test_gwf_csub_sub03.py
@@ -3,11 +3,11 @@
import flopy
import numpy as np
import pytest
+
from conftest import project_root_path
from framework import TestFramework
-from simulation import TestSimulation
-ex = ["csub_sub03a", "csub_sub03b"]
+cases = ["csub_sub03a", "csub_sub03b"]
cmppth = "mf6_regression"
cvopt = [None, None, None]
constantcv = [True, True]
@@ -25,10 +25,10 @@
# static model data
nlay, nrow, ncol = 3, 10, 10
nper = 31
-perlen = [1.0] + [365.2500000 for i in range(nper - 1)]
-nstp = [1] + [6 for i in range(nper - 1)]
-tsmult = [1.0] + [1.3 for i in range(nper - 1)]
-steady = [True] + [False for i in range(nper - 1)]
+perlen = [1.0] + [365.2500000 for _ in range(nper - 1)]
+nstp = [1] + [6 for _ in range(nper - 1)]
+tsmult = [1.0] + [1.3 for _ in range(nper - 1)]
+steady = [True] + [False for _ in range(nper - 1)]
delr, delc = 1000.0, 2000.0
top = 0.0
botm = [-100, -150.0, -350.0]
@@ -59,8 +59,8 @@
hclose, rclose, relax = 1e-9, 1e-6, 1.0
tdis_rc = []
-for idx in range(nper):
- tdis_rc.append((perlen[idx], nstp[idx], tsmult[idx]))
+for i in range(nper):
+ tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
# all cells are active
ib = 1
@@ -140,7 +140,7 @@
# SUB package problem 3
def get_model(idx, ws):
- name = ex[idx]
+ name = cases[idx]
# ibc packagedata container counter
sub6 = []
ibcno = 0
@@ -344,28 +344,25 @@ def get_model(idx, ws):
return sim
-def build_model(idx, dir):
- ws = dir
- sim = get_model(idx, ws)
+def build_models(idx, test):
+ sim = get_model(idx, test.workspace)
- ws = os.path.join(dir, cmppth)
+ ws = os.path.join(test.workspace, cmppth)
mc = get_model(idx, ws)
return sim, mc
-def eval_comp(sim):
- print("evaluating compaction...")
-
+def check_output(idx, test):
# MODFLOW 6 total compaction results
- fpth = os.path.join(sim.simpath, "csub_obs.csv")
+ fpth = os.path.join(test.workspace, "csub_obs.csv")
try:
tc = np.genfromtxt(fpth, names=True, delimiter=",")
except:
assert False, f'could not load data from "{fpth}"'
# Comparision total compaction results
- fpth = os.path.join(sim.simpath, cmppth, "csub_obs.csv")
+ fpth = os.path.join(test.workspace, cmppth, "csub_obs.csv")
try:
tc0 = np.genfromtxt(fpth, names=True, delimiter=",")
except:
@@ -377,15 +374,15 @@ def eval_comp(sim):
msg = f"maximum absolute total-compaction difference ({diffmax}) "
if diffmax > dtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
# get results from listing file
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.lst")
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.lst")
budl = flopy.utils.Mf6ListBudget(fpth)
names = list(bud_lst)
d0 = budl.get_budget(names=names)[0]
@@ -397,11 +394,11 @@ def eval_comp(sim):
d = np.recarray(nbud, dtype=dtype)
for key in bud_lst:
d[key] = 0.0
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.cbc")
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.cbc")
cobj = flopy.utils.CellBudgetFile(fpth, precision="double")
kk = cobj.get_kstpkper()
times = cobj.get_times()
- for idx, (k, t) in enumerate(zip(kk, times)):
+ for i, (k, t) in enumerate(zip(kk, times)):
for text in cbc_bud:
qin = 0.0
qout = 0.0
@@ -411,44 +408,39 @@ def eval_comp(sim):
qout -= vv
else:
qin += vv
- d["totim"][idx] = t
- d["time_step"][idx] = k[0]
+ d["totim"][i] = t
+ d["time_step"][i] = k[0]
d["stress_period"] = k[1]
key = f"{text}_IN"
- d[key][idx] = qin
+ d[key][i] = qin
key = f"{text}_OUT"
- d[key][idx] = qout
+ d[key][i] = qout
diff = np.zeros((nbud, len(bud_lst)), dtype=float)
- for idx, key in enumerate(bud_lst):
- diff[:, idx] = d0[key] - d[key]
+ for i, key in enumerate(bud_lst):
+ diff[:, i] = d0[key] - d[key]
diffmax = np.abs(diff).max()
msg = f"maximum absolute total-budget difference ({diffmax}) "
if diffmax > dtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
@pytest.mark.slow
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name,
- exe_dict=targets,
- exfunc=eval_comp,
- htol=htol[idx],
- mf6_regression=True,
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ targets=targets,
+ htol=htol[idx],
+ compare="mf6_regression",
)
+ test.run()
diff --git a/autotest/test_gwf_csub_subwt01.py b/autotest/test_gwf_csub_subwt01.py
index aad1e15e503..a1ff03b8aa7 100644
--- a/autotest/test_gwf_csub_subwt01.py
+++ b/autotest/test_gwf_csub_subwt01.py
@@ -3,12 +3,12 @@
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
-ex = ["csub_subwt01a", "csub_subwt01b", "csub_subwt01c", "csub_subwt01d"]
+cases = ["csub_subwt01a", "csub_subwt01b", "csub_subwt01c", "csub_subwt01d"]
cmppth = "mf6_regression"
-htol = [None for n in ex]
+htol = [None for _ in cases]
dtol = 1e-3
budtol = 1e-2
paktest = "csub"
@@ -62,8 +62,8 @@
fluxtol = rclose
tdis_rc = []
-for idx in range(nper):
- tdis_rc.append((perlen[idx], nstp[idx], tsmult[idx]))
+for i in range(nper):
+ tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
# this used to work
# ib = np.zeros((nlay, nrow, ncol), dtype=int)
@@ -119,7 +119,7 @@
def get_model(idx, ws):
- name = ex[idx]
+ name = cases[idx]
sim = flopy.mf6.MFSimulation(
sim_name=name,
memory_print_option="all",
@@ -238,24 +238,20 @@ def get_model(idx, ws):
return sim
-def build_model(idx, dir):
-
+def build_models(idx, test):
# build MODFLOW 6 files
- ws = dir
- sim = get_model(idx, ws)
+ sim = get_model(idx, test.workspace)
# build comparision files
- ws = os.path.join(dir, cmppth)
+ ws = os.path.join(test.workspace, cmppth)
mc = get_model(idx, ws)
return sim, mc
-def eval_comp(sim):
-
- print("evaluating compaction...")
+def check_output(idx, test):
# MODFLOW 6 total compaction results
- fpth = os.path.join(sim.simpath, "csub_obs.csv")
+ fpth = os.path.join(test.workspace, "csub_obs.csv")
try:
tc = np.genfromtxt(fpth, names=True, delimiter=",")
except:
@@ -263,7 +259,7 @@ def eval_comp(sim):
# Comparision total compaction results
cpth = cmppth
- fpth = os.path.join(sim.simpath, cpth, "csub_obs.csv")
+ fpth = os.path.join(test.workspace, cpth, "csub_obs.csv")
try:
tc0 = np.genfromtxt(fpth, names=True, delimiter=",")
except:
@@ -277,41 +273,37 @@ def eval_comp(sim):
# write summary
fpth = os.path.join(
- sim.simpath, f"{os.path.basename(sim.name)}.comp.cmp.out"
+ test.workspace, f"{os.path.basename(test.name)}.comp.cmp.out"
)
- f = open(fpth, "w")
- line = f"{'TOTIM':>15s}"
- line += f" {'CSUB':>15s}"
- line += f" {'MF':>15s}"
- line += f" {'DIFF':>15s}"
- f.write(line + "\n")
- for i in range(diff.shape[0]):
- line = f"{tc0['time'][i]:15g}"
- line += f" {tc[loctag][i]:15g}"
- line += f" {tc0[loctag][i]:15g}"
- line += f" {diff[i]:15g}"
+ with open(fpth, "w") as f:
+ line = f"{'TOTIM':>15s}"
+ line += f" {'CSUB':>15s}"
+ line += f" {'MF':>15s}"
+ line += f" {'DIFF':>15s}"
f.write(line + "\n")
- f.close()
+ for i in range(diff.shape[0]):
+ line = f"{tc0['time'][i]:15g}"
+ line += f" {tc[loctag][i]:15g}"
+ line += f" {tc0[loctag][i]:15g}"
+ line += f" {diff[i]:15g}"
+ f.write(line + "\n")
if diffmax > dtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
# compare budgets
- cbc_compare(sim)
-
- return
+ cbc_compare(test)
# compare cbc and lst budgets
-def cbc_compare(sim):
- print("evaluating cbc and budget...")
+def cbc_compare(test):
# open cbc file
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.cbc")
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.cbc")
cobj = flopy.utils.CellBudgetFile(fpth, precision="double")
# build list of cbc data to retrieve
@@ -328,7 +320,7 @@ def cbc_compare(sim):
bud_lst.append(f"{t}_OUT")
# get results from listing file
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.lst")
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.lst")
budl = flopy.utils.Mf6ListBudget(fpth)
names = list(bud_lst)
d0 = budl.get_budget(names=names)[0]
@@ -341,7 +333,7 @@ def cbc_compare(sim):
# get data from cbc dile
kk = cobj.get_kstpkper()
times = cobj.get_times()
- for idx, (k, t) in enumerate(zip(kk, times)):
+ for i, (k, t) in enumerate(zip(kk, times)):
for text in cbc_bud:
qin = 0.0
qout = 0.0
@@ -359,65 +351,59 @@ def cbc_compare(sim):
qout -= vv
else:
qin += vv
- d["totim"][idx] = t
- d["time_step"][idx] = k[0]
+ d["totim"][i] = t
+ d["time_step"][i] = k[0]
d["stress_period"] = k[1]
key = f"{text}_IN"
- d[key][idx] = qin
+ d[key][i] = qin
key = f"{text}_OUT"
- d[key][idx] = qout
+ d[key][i] = qout
diff = np.zeros((nbud, len(bud_lst)), dtype=float)
- for idx, key in enumerate(bud_lst):
- diff[:, idx] = d0[key] - d[key]
+ for i, key in enumerate(bud_lst):
+ diff[:, i] = d0[key] - d[key]
diffmax = np.abs(diff).max()
msg = f"maximum absolute total-budget difference ({diffmax}) "
# write summary
fpth = os.path.join(
- sim.simpath, f"{os.path.basename(sim.name)}.bud.cmp.out"
+ test.workspace, f"{os.path.basename(test.name)}.bud.cmp.out"
)
- f = open(fpth, "w")
- for i in range(diff.shape[0]):
- if i == 0:
- line = f"{'TIME':>10s}"
- for idx, key in enumerate(bud_lst):
- line += f"{key + '_LST':>25s}"
- line += f"{key + '_CBC':>25s}"
- line += f"{key + '_DIF':>25s}"
+ with open(fpth, "w") as f:
+ for i in range(diff.shape[0]):
+ if i == 0:
+ line = f"{'TIME':>10s}"
+ for key in bud_lst:
+ line += f"{key + '_LST':>25s}"
+ line += f"{key + '_CBC':>25s}"
+ line += f"{key + '_DIF':>25s}"
+ f.write(line + "\n")
+ line = f"{d['totim'][i]:10g}"
+ for ii, key in enumerate(bud_lst):
+ line += f"{d0[key][i]:25g}"
+ line += f"{d[key][i]:25g}"
+ line += f"{diff[i, ii]:25g}"
f.write(line + "\n")
- line = f"{d['totim'][i]:10g}"
- for idx, key in enumerate(bud_lst):
- line += f"{d0[key][i]:25g}"
- line += f"{d[key][i]:25g}"
- line += f"{diff[i, idx]:25g}"
- f.write(line + "\n")
- f.close()
if diffmax > budtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
@pytest.mark.slow
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name,
- exe_dict=targets,
- exfunc=eval_comp,
- htol=htol[idx],
- mf6_regression=True,
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ targets=targets,
+ htol=htol[idx],
+ compare="mf6_regression",
)
+ test.run()
diff --git a/autotest/test_gwf_csub_subwt02.py b/autotest/test_gwf_csub_subwt02.py
index bf9068e4172..877e5474d4c 100644
--- a/autotest/test_gwf_csub_subwt02.py
+++ b/autotest/test_gwf_csub_subwt02.py
@@ -3,11 +3,11 @@
import flopy
import numpy as np
import pytest
+
from conftest import project_root_path
from framework import TestFramework
-from simulation import TestSimulation
-ex = ["csub_subwt02a", "csub_subwt02b", "csub_subwt02c", "csub_subwt02d"]
+cases = ["csub_subwt02a", "csub_subwt02b", "csub_subwt02c", "csub_subwt02d"]
timeseries = [True, False, True, False]
cmppth = "mf6_regression"
htol = [None, None, None, None]
@@ -183,7 +183,7 @@
# beta = 4.65120000e-10
gammaw = 9806.65000000
sw = beta * gammaw * theta
-ss = [sw for k in range(nlay)]
+ss = [sw for _ in range(nlay)]
swt6 = []
ibcno = 0
@@ -216,7 +216,7 @@
def get_model(idx, ws):
- name = ex[idx]
+ name = cases[idx]
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
@@ -407,23 +407,21 @@ def get_model(idx, ws):
return sim
-def build_model(idx, dir):
-
+def build_models(idx, test):
# build MODFLOW 6 files
- ws = dir
+ ws = test.workspace
sim = get_model(idx, ws)
# build comparision files
- ws = os.path.join(dir, cmppth)
+ ws = os.path.join(test.workspace, cmppth)
mc = get_model(idx, ws)
return sim, mc
-def eval_comp(sim):
- print("evaluating compaction...")
+def check_output(idx, test):
# MODFLOW 6 total compaction results
- fpth = os.path.join(sim.simpath, "csub_obs.csv")
+ fpth = os.path.join(test.workspace, "csub_obs.csv")
try:
tc = np.genfromtxt(fpth, names=True, delimiter=",")
except:
@@ -431,7 +429,7 @@ def eval_comp(sim):
# comparision total compaction results
cpth = cmppth
- fpth = os.path.join(sim.simpath, cmppth, "csub_obs.csv")
+ fpth = os.path.join(test.workspace, cmppth, "csub_obs.csv")
try:
tc0 = np.genfromtxt(fpth, names=True, delimiter=",")
except:
@@ -445,7 +443,7 @@ def eval_comp(sim):
# write summary
fpth = os.path.join(
- sim.simpath, f"{os.path.basename(sim.name)}.comp.cmp.out"
+ test.workspace, f"{os.path.basename(test.name)}.comp.cmp.out"
)
f = open(fpth, "w")
line = f"{'TOTIM':>15s}"
@@ -462,24 +460,23 @@ def eval_comp(sim):
f.close()
if diffmax > dtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
# compare budgets
- cbc_compare(sim)
+ cbc_compare(test)
return
# compare cbc and lst budgets
-def cbc_compare(sim):
- print("evaluating cbc and budget...")
+def cbc_compare(test):
# open cbc file
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.cbc")
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.cbc")
cobj = flopy.utils.CellBudgetFile(fpth, precision="double")
# build list of cbc data to retrieve
@@ -496,7 +493,7 @@ def cbc_compare(sim):
bud_lst.append(f"{t}_OUT")
# get results from listing file
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.lst")
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.lst")
budl = flopy.utils.Mf6ListBudget(fpth)
names = list(bud_lst)
d0 = budl.get_budget(names=names)[0]
@@ -509,7 +506,7 @@ def cbc_compare(sim):
# get data from cbc dile
kk = cobj.get_kstpkper()
times = cobj.get_times()
- for idx, (k, t) in enumerate(zip(kk, times)):
+ for i, (k, t) in enumerate(zip(kk, times)):
for text in cbc_bud:
qin = 0.0
qout = 0.0
@@ -527,65 +524,59 @@ def cbc_compare(sim):
qout -= vv
else:
qin += vv
- d["totim"][idx] = t
- d["time_step"][idx] = k[0]
+ d["totim"][i] = t
+ d["time_step"][i] = k[0]
d["stress_period"] = k[1]
key = f"{text}_IN"
- d[key][idx] = qin
+ d[key][i] = qin
key = f"{text}_OUT"
- d[key][idx] = qout
+ d[key][i] = qout
diff = np.zeros((nbud, len(bud_lst)), dtype=float)
- for idx, key in enumerate(bud_lst):
- diff[:, idx] = d0[key] - d[key]
+ for i, key in enumerate(bud_lst):
+ diff[:, i] = d0[key] - d[key]
diffmax = np.abs(diff).max()
msg = f"maximum absolute total-budget difference ({diffmax}) "
# write summary
fpth = os.path.join(
- sim.simpath, f"{os.path.basename(sim.name)}.bud.cmp.out"
+ test.workspace, f"{os.path.basename(test.name)}.bud.cmp.out"
)
- f = open(fpth, "w")
- for i in range(diff.shape[0]):
- if i == 0:
- line = f"{'TIME':>10s}"
- for idx, key in enumerate(bud_lst):
- line += f"{key + '_LST':>25s}"
- line += f"{key + '_CBC':>25s}"
- line += f"{key + '_DIF':>25s}"
+ with open(fpth, "w") as f:
+ for i in range(diff.shape[0]):
+ if i == 0:
+ line = f"{'TIME':>10s}"
+ for key in bud_lst:
+ line += f"{key + '_LST':>25s}"
+ line += f"{key + '_CBC':>25s}"
+ line += f"{key + '_DIF':>25s}"
+ f.write(line + "\n")
+ line = f"{d['totim'][i]:10g}"
+ for ii, key in enumerate(bud_lst):
+ line += f"{d0[key][i]:25g}"
+ line += f"{d[key][i]:25g}"
+ line += f"{diff[i, ii]:25g}"
f.write(line + "\n")
- line = f"{d['totim'][i]:10g}"
- for idx, key in enumerate(bud_lst):
- line += f"{d0[key][i]:25g}"
- line += f"{d[key][i]:25g}"
- line += f"{diff[i, idx]:25g}"
- f.write(line + "\n")
- f.close()
if diffmax > budtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
@pytest.mark.slow
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name,
- exe_dict=targets,
- exfunc=eval_comp,
- htol=htol[idx],
- mf6_regression=True,
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ targets=targets,
+ htol=htol[idx],
+ compare="mf6_regression",
)
+ test.run()
diff --git a/autotest/test_gwf_csub_subwt03.py b/autotest/test_gwf_csub_subwt03.py
index abe743ca558..14c56e899d3 100644
--- a/autotest/test_gwf_csub_subwt03.py
+++ b/autotest/test_gwf_csub_subwt03.py
@@ -3,12 +3,12 @@
import flopy
import numpy as np
import pytest
+
from conftest import project_root_path
from framework import TestFramework
-from simulation import TestSimulation
-ex = ["csub_subwt03a", "csub_subwt03b", "csub_subwt03c", "csub_subwt03d"]
-nex = len(ex)
+cases = ["csub_subwt03a", "csub_subwt03b", "csub_subwt03c", "csub_subwt03d"]
+nex = len(cases)
cmppth = "mf6"
htol = None # 0.1
dtol = 1e-3
@@ -29,8 +29,8 @@
tsmult = [1.0, 1.0, 1.0]
steady = [True, False, False]
tdis_rc = []
-for idx in range(nper):
- tdis_rc.append((perlen[idx], nstp[idx], tsmult[idx]))
+for i in range(nper):
+ tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
# spatial discretization
nlay, nrow, ncol = 4, ib0.shape[0], ib0.shape[1]
@@ -57,18 +57,18 @@
wnlays = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 3]
wnrows = [0, 1, 1, 1, 2, 3, 4, 4, 5, 6, 13, 13, 15, 15, 16, 17, 17, 18, 8, 11]
wncols = [7, 4, 7, 11, 3, 11, 2, 12, 13, 1, 1, 13, 2, 12, 12, 3, 11, 6, 9, 6]
-wrates0 = [2.2e3 for n in range(18)] + [0.0, 0.0]
-wrates1 = [2.2e3 for n in range(18)] + [-7.2e04, -7.2e04]
+wrates0 = [2.2e3 for _ in range(18)] + [0.0, 0.0]
+wrates1 = [2.2e3 for _ in range(18)] + [-7.2e04, -7.2e04]
w0 = []
w1 = []
ws0 = []
ws1 = []
-for idx, (k, i, j) in enumerate(zip(wnlays, wnrows, wncols)):
- w0.append((k, i, j, wrates0[idx]))
- w1.append((k, i, j, wrates1[idx]))
- ws0.append(((k, i, j), wrates0[idx]))
- ws1.append(((k, i, j), wrates1[idx]))
+for i, (k, i, j) in enumerate(zip(wnlays, wnrows, wncols)):
+ w0.append((k, i, j, wrates0[i]))
+ w1.append((k, i, j, wrates1[i]))
+ ws0.append(((k, i, j), wrates0[i]))
+ ws1.append(((k, i, j), wrates1[i]))
wd = {0: w0, 1: w1, 2: w0}
wd6 = {0: ws0, 1: ws1, 2: ws0}
@@ -202,11 +202,11 @@ def get_interbed(headbased=False, delay=False):
return swt6
-def build_model(idx, dir):
- sim = build_mf6(idx, dir)
+def build_models(idx, test):
+ sim = build_mf6(idx, test.workspace)
# build mf6 with interbeds
- wsc = os.path.join(dir, "mf6")
+ wsc = os.path.join(test.workspace, "mf6")
mc = build_mf6(idx, wsc, interbed=True)
return sim, mc
@@ -214,8 +214,7 @@ def build_model(idx, dir):
# build MODFLOW 6 files
def build_mf6(idx, ws, interbed=False):
-
- name = ex[idx]
+ name = cases[idx]
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
@@ -353,18 +352,16 @@ def build_mf6(idx, ws, interbed=False):
return sim
-def eval_comp(sim):
- print("evaluating compaction...")
-
+def check_output(idx, test):
# MODFLOW 6 without interbeds
- fpth = os.path.join(sim.simpath, "csub_obs.csv")
+ fpth = os.path.join(test.workspace, "csub_obs.csv")
try:
tc = np.genfromtxt(fpth, names=True, delimiter=",")
except:
assert False, f'could not load data from "{fpth}"'
# MODFLOW 6 with interbeds
- fpth = os.path.join(sim.simpath, cmppth, "csub_obs.csv")
+ fpth = os.path.join(test.workspace, cmppth, "csub_obs.csv")
try:
tci = np.genfromtxt(fpth, names=True, delimiter=",")
except:
@@ -383,41 +380,39 @@ def eval_comp(sim):
# write summary
fpth = os.path.join(
- sim.simpath, f"{os.path.basename(sim.name)}.comp.cmp.out"
+ test.workspace, f"{os.path.basename(test.name)}.comp.cmp.out"
)
- f = open(fpth, "w")
- line = f"{'TOTIM':>15s}"
- for tag in tc.dtype.names[1:]:
- line += f" {f'{tag}_SK':>15s}"
- line += f" {f'{tag}_SKIB':>15s}"
- line += f" {f'{tag}_DIFF':>15s}"
- f.write(line + "\n")
- for i in range(diff.shape[0]):
- line = f"{tc['time'][i]:15g}"
+ with open(fpth, "w") as f:
+ line = f"{'TOTIM':>15s}"
for tag in tc.dtype.names[1:]:
- line += f" {tc[tag][i]:15g}"
- line += f" {tci[tag][i]:15g}"
- line += f" {tc[tag][i] - tci[tag][i]:15g}"
+ line += f" {f'{tag}_SK':>15s}"
+ line += f" {f'{tag}_SKIB':>15s}"
+ line += f" {f'{tag}_DIFF':>15s}"
f.write(line + "\n")
- f.close()
+ for i in range(diff.shape[0]):
+ line = f"{tc['time'][i]:15g}"
+ for tag in tc.dtype.names[1:]:
+ line += f" {tc[tag][i]:15g}"
+ line += f" {tci[tag][i]:15g}"
+ line += f" {tc[tag][i] - tci[tag][i]:15g}"
+ f.write(line + "\n")
if diffmax > dtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
# compare budgets
- cbc_compare(sim)
+ cbc_compare(test)
# compare cbc and lst budgets
-def cbc_compare(sim):
- print("evaluating cbc and budget...")
+def cbc_compare(test):
# open cbc file
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.cbc")
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.cbc")
cobj = flopy.utils.CellBudgetFile(fpth, precision="double")
# build list of cbc data to retrieve
@@ -434,7 +429,7 @@ def cbc_compare(sim):
bud_lst.append(f"{t}_OUT")
# get results from listing file
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.lst")
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.lst")
budl = flopy.utils.Mf6ListBudget(fpth)
names = list(bud_lst)
d0 = budl.get_budget(names=names)[0]
@@ -447,7 +442,7 @@ def cbc_compare(sim):
# get data from cbc dile
kk = cobj.get_kstpkper()
times = cobj.get_times()
- for idx, (k, t) in enumerate(zip(kk, times)):
+ for i, (k, t) in enumerate(zip(kk, times)):
for text in cbc_bud:
qin = 0.0
qout = 0.0
@@ -465,66 +460,58 @@ def cbc_compare(sim):
qout -= vv
else:
qin += vv
- d["totim"][idx] = t
- d["time_step"][idx] = k[0]
+ d["totim"][i] = t
+ d["time_step"][i] = k[0]
d["stress_period"] = k[1]
key = f"{text}_IN"
- d[key][idx] = qin
+ d[key][i] = qin
key = f"{text}_OUT"
- d[key][idx] = qout
+ d[key][i] = qout
diff = np.zeros((nbud, len(bud_lst)), dtype=float)
- for idx, key in enumerate(bud_lst):
- diff[:, idx] = d0[key] - d[key]
+ for i, key in enumerate(bud_lst):
+ diff[:, i] = d0[key] - d[key]
diffmax = np.abs(diff).max()
msg = f"maximum absolute total-budget difference ({diffmax}) "
# write summary
fpth = os.path.join(
- sim.simpath, f"{os.path.basename(sim.name)}.bud.cmp.out"
+ test.workspace, f"{os.path.basename(test.name)}.bud.cmp.out"
)
- f = open(fpth, "w")
- for i in range(diff.shape[0]):
- if i == 0:
- line = f"{'TIME':>10s}"
- for idx, key in enumerate(bud_lst):
- line += f"{key + '_LST':>25s}"
- line += f"{key + '_CBC':>25s}"
- line += f"{key + '_DIF':>25s}"
+ with open(fpth, "w") as f:
+ for i in range(diff.shape[0]):
+ if i == 0:
+ line = f"{'TIME':>10s}"
+ for key in bud_lst:
+ line += f"{key + '_LST':>25s}"
+ line += f"{key + '_CBC':>25s}"
+ line += f"{key + '_DIF':>25s}"
+ f.write(line + "\n")
+ line = f"{d['totim'][i]:10g}"
+ for ii, key in enumerate(bud_lst):
+ line += f"{d0[key][i]:25g}"
+ line += f"{d[key][i]:25g}"
+ line += f"{diff[i, ii]:25g}"
f.write(line + "\n")
- line = f"{d['totim'][i]:10g}"
- for idx, key in enumerate(bud_lst):
- line += f"{d0[key][i]:25g}"
- line += f"{d[key][i]:25g}"
- line += f"{diff[i, idx]:25g}"
- f.write(line + "\n")
- f.close()
if diffmax > budtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
@pytest.mark.slow
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- ws = str(function_tmpdir)
- test = TestFramework()
- test.build(build_model, idx, ws)
- test.run(
- TestSimulation(
- name=name,
- exe_dict=targets,
- exfunc=eval_comp,
- cmp_verbose=False,
- htol=htol,
- ),
- ws,
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ htol=htol,
)
+ test.run()
diff --git a/autotest/test_gwf_csub_wc01.py b/autotest/test_gwf_csub_wc01.py
index 1ec2bd2d69f..516bd661fb1 100644
--- a/autotest/test_gwf_csub_wc01.py
+++ b/autotest/test_gwf_csub_wc01.py
@@ -3,11 +3,11 @@
import flopy
import numpy as np
import pytest
+
from conftest import project_root_path
from framework import TestFramework
-from simulation import TestSimulation
-ex = ["csub_wc01a", "csub_wc02b"]
+cases = ["csub_wc01a", "csub_wc02b"]
cmppth = "mf6"
dtol = 1e-3
budtol = 1e-2
@@ -25,8 +25,8 @@
tsmult = [1.0, 1.0, 1.0]
steady = [True, False, False]
tdis_rc = []
-for idx in range(nper):
- tdis_rc.append((perlen[idx], nstp[idx], tsmult[idx]))
+for i in range(nper):
+ tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
# spatial discretization
nlay, nrow, ncol = 4, ib0.shape[0], ib0.shape[1]
@@ -61,20 +61,20 @@
wnlays = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 3]
wnrows = [0, 1, 1, 1, 2, 3, 4, 4, 5, 6, 13, 13, 15, 15, 16, 17, 17, 18, 8, 11]
wncols = [7, 4, 7, 11, 3, 11, 2, 12, 13, 1, 1, 13, 2, 12, 12, 3, 11, 6, 9, 6]
-wrates0 = [2.2e3 for n in range(18)] + [0.0, 0.0]
-wrates1 = [2.2e3 for n in range(18)] + [-7.2e03, -7.2e03]
+wrates0 = [2.2e3 for _ in range(18)] + [0.0, 0.0]
+wrates1 = [2.2e3 for _ in range(18)] + [-7.2e03, -7.2e03]
w0 = []
w1 = []
ws0 = []
ws1 = []
-for idx, (k, i, j) in enumerate(zip(wnlays, wnrows, wncols)):
+for i, (k, i, j) in enumerate(zip(wnlays, wnrows, wncols)):
if ib0[i, j] < 1:
continue
- w0.append((k, i, j, wrates0[idx]))
- w1.append((k, i, j, wrates1[idx]))
- ws0.append(((k, i, j), wrates0[idx]))
- ws1.append(((k, i, j), wrates1[idx]))
+ w0.append((k, i, j, wrates0[i]))
+ w1.append((k, i, j, wrates1[i]))
+ ws0.append(((k, i, j), wrates0[i]))
+ ws1.append(((k, i, j), wrates1[i]))
wd = {0: w0, 1: w1, 2: w0}
wd6 = {0: ws0, 1: ws1, 2: ws0}
print(wd6)
@@ -211,11 +211,11 @@
]
-def build_model(idx, dir):
- sim = build_mf6(idx, dir)
+def build_models(idx, test):
+ sim = build_mf6(idx, test.workspace)
# build mf6 with interbeds
- wsc = os.path.join(dir, "mf6")
+ wsc = os.path.join(test.workspace, "mf6")
mc = build_mf6(idx, wsc, interbed=True)
return sim, mc
@@ -223,8 +223,7 @@ def build_model(idx, dir):
# build MODFLOW 6 files
def build_mf6(idx, ws, interbed=False):
-
- name = ex[idx]
+ name = cases[idx]
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
@@ -344,18 +343,16 @@ def build_mf6(idx, ws, interbed=False):
return sim
-def eval_wcomp(sim):
- print("evaluating compaction...")
-
+def check_output(idx, test):
# MODFLOW 6 without interbeds water compressibility
- fpth = os.path.join(sim.simpath, "csub_obs.csv")
+ fpth = os.path.join(test.workspace, "csub_obs.csv")
try:
tc = np.genfromtxt(fpth, names=True, delimiter=",")
except:
assert False, f'could not load data from "{fpth}"'
# MODFLOW 6 with interbeds water compressibility
- fpth = os.path.join(sim.simpath, cmppth, "csub_obs.csv")
+ fpth = os.path.join(test.workspace, cmppth, "csub_obs.csv")
try:
tci = np.genfromtxt(fpth, names=True, delimiter=",")
except:
@@ -377,41 +374,39 @@ def eval_wcomp(sim):
# write summary
fpth = os.path.join(
- sim.simpath, f"{os.path.basename(sim.name)}.wcomp.cmp.out"
+ test.workspace, f"{os.path.basename(test.name)}.wcomp.cmp.out"
)
- f = open(fpth, "w")
- line = f"{'TOTIM':>15s}"
- for tag in tc.dtype.names[1:]:
- line += f" {f'{tag}_SK':>15s}"
- line += f" {f'{tag}_SKIB':>15s}"
- line += f" {f'{tag}_DIFF':>15s}"
- f.write(line + "\n")
- for i in range(diff.shape[0]):
- line = f"{tc['time'][i]:15g}"
+ with open(fpth, "w") as f:
+ line = f"{'TOTIM':>15s}"
for tag in tc.dtype.names[1:]:
- line += f" {tc[tag][i]:15g}"
- line += f" {tci[tag][i]:15g}"
- line += f" {tc[tag][i] - tci[tag][i]:15g}"
+ line += f" {f'{tag}_SK':>15s}"
+ line += f" {f'{tag}_SKIB':>15s}"
+ line += f" {f'{tag}_DIFF':>15s}"
f.write(line + "\n")
- f.close()
+ for i in range(diff.shape[0]):
+ line = f"{tc['time'][i]:15g}"
+ for tag in tc.dtype.names[1:]:
+ line += f" {tc[tag][i]:15g}"
+ line += f" {tci[tag][i]:15g}"
+ line += f" {tc[tag][i] - tci[tag][i]:15g}"
+ f.write(line + "\n")
if diffmax > dtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
# compare budgets
- cbc_compare(sim)
+ cbc_compare(test)
# compare cbc and lst budgets
-def cbc_compare(sim):
- print("evaluating cbc and budget...")
+def cbc_compare(test):
# open cbc file
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.cbc")
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.cbc")
cobj = flopy.utils.CellBudgetFile(fpth, precision="double")
# build list of cbc data to retrieve
@@ -428,7 +423,7 @@ def cbc_compare(sim):
bud_lst.append(f"{t}_OUT")
# get results from listing file
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.lst")
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.lst")
budl = flopy.utils.Mf6ListBudget(fpth)
names = list(bud_lst)
d0 = budl.get_budget(names=names)[0]
@@ -441,7 +436,7 @@ def cbc_compare(sim):
# get data from cbc dile
kk = cobj.get_kstpkper()
times = cobj.get_times()
- for idx, (k, t) in enumerate(zip(kk, times)):
+ for i, (k, t) in enumerate(zip(kk, times)):
for text in cbc_bud:
qin = 0.0
qout = 0.0
@@ -459,59 +454,57 @@ def cbc_compare(sim):
qout -= vv
else:
qin += vv
- d["totim"][idx] = t
- d["time_step"][idx] = k[0]
+ d["totim"][i] = t
+ d["time_step"][i] = k[0]
d["stress_period"] = k[1]
key = f"{text}_IN"
- d[key][idx] = qin
+ d[key][i] = qin
key = f"{text}_OUT"
- d[key][idx] = qout
+ d[key][i] = qout
diff = np.zeros((nbud, len(bud_lst)), dtype=float)
- for idx, key in enumerate(bud_lst):
- diff[:, idx] = d0[key] - d[key]
+ for i, key in enumerate(bud_lst):
+ diff[:, i] = d0[key] - d[key]
diffmax = np.abs(diff).max()
msg = f"maximum absolute total-budget difference ({diffmax}) "
# write summary
fpth = os.path.join(
- sim.simpath, f"{os.path.basename(sim.name)}.bud.cmp.out"
+ test.workspace, f"{os.path.basename(test.name)}.bud.cmp.out"
)
- f = open(fpth, "w")
- for i in range(diff.shape[0]):
- if i == 0:
- line = f"{'TIME':>10s}"
- for idx, key in enumerate(bud_lst):
- line += f"{key + '_LST':>25s}"
- line += f"{key + '_CBC':>25s}"
- line += f"{key + '_DIF':>25s}"
+ with open(fpth, "w") as f:
+ for i in range(diff.shape[0]):
+ if i == 0:
+ line = f"{'TIME':>10s}"
+ for key in bud_lst:
+ line += f"{key + '_LST':>25s}"
+ line += f"{key + '_CBC':>25s}"
+ line += f"{key + '_DIF':>25s}"
+ f.write(line + "\n")
+ line = f"{d['totim'][i]:10g}"
+ for ii, key in enumerate(bud_lst):
+ line += f"{d0[key][i]:25g}"
+ line += f"{d[key][i]:25g}"
+ line += f"{diff[i, ii]:25g}"
f.write(line + "\n")
- line = f"{d['totim'][i]:10g}"
- for idx, key in enumerate(bud_lst):
- line += f"{d0[key][i]:25g}"
- line += f"{d[key][i]:25g}"
- line += f"{diff[i, idx]:25g}"
- f.write(line + "\n")
- f.close()
if diffmax > budtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
@pytest.mark.slow
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(name=name, exe_dict=targets, exfunc=eval_wcomp),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
)
+ test.run()
diff --git a/autotest/test_gwf_csub_wtgeo.py b/autotest/test_gwf_csub_wtgeo.py
index 6f691516290..a0f50764e8d 100644
--- a/autotest/test_gwf_csub_wtgeo.py
+++ b/autotest/test_gwf_csub_wtgeo.py
@@ -3,10 +3,10 @@
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
-ex = [
+cases = [
"csub_wtgeoa",
"csub_wtgeob",
"csub_wtgeoc",
@@ -15,19 +15,19 @@
"csub_wtgeof",
"csub_wtgeog",
]
-constantcv = [True for idx in range(len(ex))]
+constantcv = [True for _ in range(len(cases))]
cmppth = "mf6_regression"
-compare = [True for idx in range(len(ex))]
+compare = [True for _ in range(len(cases))]
tops = [0.0, 0.0, 150.0, 0.0, 0.0, 150.0, 150.0]
ump = [None, None, True, None, True, None, True]
iump = [0, 0, 1, 0, 1, 0, 1]
-eslag = [True for idx in range(len(ex) - 2)] + 2 * [False]
+eslag = [True for _ in range(len(cases) - 2)] + 2 * [False]
# eslag = [True, True, True, False, True, False, False]
headformulation = [True, False, False, True, True, False, False]
ndc = [None, None, None, 19, 19, 19, 19]
delay = [False, False, False, True, True, True, True]
# newton = ["", "", "", "", "", None, ""]
-newton = ["NEWTON" for idx in range(len(ex))]
+newton = ["NEWTON" for _ in range(len(cases))]
htol = [None, None, None, 0.2, None, None, None]
dtol = 1e-3
@@ -37,13 +37,13 @@
# static model data
# temporal discretization
nper = 31
-perlen = [1.0] + [365.2500000 for i in range(nper - 1)]
-nstp = [1] + [6 for i in range(nper - 1)]
-tsmult = [1.0] + [1.3 for i in range(nper - 1)]
-steady = [True] + [False for i in range(nper - 1)]
+perlen = [1.0] + [365.2500000 for _ in range(nper - 1)]
+nstp = [1] + [6 for _ in range(nper - 1)]
+tsmult = [1.0] + [1.3 for _ in range(nper - 1)]
+steady = [True] + [False for _ in range(nper - 1)]
tdis_rc = []
-for idx in range(nper):
- tdis_rc.append((perlen[idx], nstp[idx], tsmult[idx]))
+for i in range(nper):
+ tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
# spatial discretization data
nlay, nrow, ncol = 3, 10, 10
@@ -161,7 +161,7 @@ def calc_stress(sgm0, sgs0, h, bt):
def get_model(idx, ws):
- name = ex[idx]
+ name = cases[idx]
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
@@ -472,25 +472,25 @@ def get_model(idx, ws):
return sim
-def build_model(idx, dir):
- sim = get_model(idx, dir) # modflow6 files
- mc = get_model(idx, os.path.join(dir, cmppth)) # build comparison files
+def build_models(idx, test):
+ sim = get_model(idx, test.workspace) # modflow6 files
+ mc = get_model(
+ idx, os.path.join(test.workspace, cmppth)
+ ) # build comparison files
return sim, mc
-def eval_comp(sim):
-
- if compare[sim.idxsim]:
- print("evaluating compaction...")
+def check_output(idx, test):
+ if compare[idx]:
# MODFLOW 6 total compaction results
- fpth = os.path.join(sim.simpath, "csub_obs.csv")
+ fpth = os.path.join(test.workspace, "csub_obs.csv")
try:
tc = np.genfromtxt(fpth, names=True, delimiter=",")
except:
assert False, f'could not load data from "{fpth}"'
# comparision total compaction results
- fpth = os.path.join(sim.simpath, cmppth, "csub_obs.csv")
+ fpth = os.path.join(test.workspace, cmppth, "csub_obs.csv")
try:
tc0 = np.genfromtxt(fpth, names=True, delimiter=",")
except:
@@ -503,40 +503,36 @@ def eval_comp(sim):
# write summary
fpth = os.path.join(
- sim.simpath, f"{os.path.basename(sim.name)}.comp.cmp.out"
+ test.workspace, f"{os.path.basename(test.name)}.comp.cmp.out"
)
- f = open(fpth, "w")
- line = f"{'TOTIM':>15s}"
- line += f" {'CSUB':>15s}"
- line += f" {'MF':>15s}"
- line += f" {'DIFF':>15s}"
- f.write(line + "\n")
- for i in range(diff.shape[0]):
- line = f"{tc0['time'][i]:15g}"
- line += f" {tc['TCOMP3'][i]:15g}"
- line += f" {tc0['TCOMP3'][i]:15g}"
- line += f" {diff[i]:15g}"
+ with open(fpth, "w") as f:
+ line = f"{'TOTIM':>15s}"
+ line += f" {'CSUB':>15s}"
+ line += f" {'MF':>15s}"
+ line += f" {'DIFF':>15s}"
f.write(line + "\n")
- f.close()
+ for i in range(diff.shape[0]):
+ line = f"{tc0['time'][i]:15g}"
+ line += f" {tc['TCOMP3'][i]:15g}"
+ line += f" {tc0['TCOMP3'][i]:15g}"
+ line += f" {diff[i]:15g}"
+ f.write(line + "\n")
if diffmax > dtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
# compare budgets
- cbc_compare(sim)
+ cbc_compare(test)
- return
-
-def cbc_compare(sim):
- print("evaluating cbc and budget...")
+def cbc_compare(test):
# open cbc file
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.cbc")
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.cbc")
cobj = flopy.utils.CellBudgetFile(fpth, precision="double")
# build list of cbc data to retrieve
@@ -553,7 +549,7 @@ def cbc_compare(sim):
bud_lst.append(f"{t}_OUT")
# get results from listing file
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.lst")
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.lst")
budl = flopy.utils.Mf6ListBudget(fpth)
names = list(bud_lst)
d0 = budl.get_budget(names=names)[0]
@@ -566,7 +562,7 @@ def cbc_compare(sim):
# get data from cbc dile
kk = cobj.get_kstpkper()
times = cobj.get_times()
- for idx, (k, t) in enumerate(zip(kk, times)):
+ for i, (k, t) in enumerate(zip(kk, times)):
for text in cbc_bud:
qin = 0.0
qout = 0.0
@@ -584,64 +580,60 @@ def cbc_compare(sim):
qout -= vv
else:
qin += vv
- d["totim"][idx] = t
- d["time_step"][idx] = k[0]
+ d["totim"][i] = t
+ d["time_step"][i] = k[0]
d["stress_period"] = k[1]
key = f"{text}_IN"
- d[key][idx] = qin
+ d[key][i] = qin
key = f"{text}_OUT"
- d[key][idx] = qout
+ d[key][i] = qout
diff = np.zeros((nbud, len(bud_lst)), dtype=float)
- for idx, key in enumerate(bud_lst):
- diff[:, idx] = d0[key] - d[key]
+ for i, key in enumerate(bud_lst):
+ diff[:, i] = d0[key] - d[key]
diffmax = np.abs(diff).max()
msg = f"maximum absolute total-budget difference ({diffmax}) "
# write summary
fpth = os.path.join(
- sim.simpath, f"{os.path.basename(sim.name)}.bud.cmp.out"
+ test.workspace, f"{os.path.basename(test.name)}.bud.cmp.out"
)
- f = open(fpth, "w")
- for i in range(diff.shape[0]):
- if i == 0:
- line = f"{'TIME':>10s}"
- for idx, key in enumerate(bud_lst):
- line += f"{key + '_LST':>25s}"
- line += f"{key + '_CBC':>25s}"
- line += f"{key + '_DIF':>25s}"
+ with open(fpth, "w") as f:
+ for i in range(diff.shape[0]):
+ if i == 0:
+ line = f"{'TIME':>10s}"
+ for key in bud_lst:
+ line += f"{key + '_LST':>25s}"
+ line += f"{key + '_CBC':>25s}"
+ line += f"{key + '_DIF':>25s}"
+ f.write(line + "\n")
+ line = f"{d['totim'][i]:10g}"
+ for ii, key in enumerate(bud_lst):
+ line += f"{d0[key][i]:25g}"
+ line += f"{d[key][i]:25g}"
+ line += f"{diff[i, ii]:25g}"
f.write(line + "\n")
- line = f"{d['totim'][i]:10g}"
- for idx, key in enumerate(bud_lst):
- line += f"{d0[key][i]:25g}"
- line += f"{d[key][i]:25g}"
- line += f"{diff[i, idx]:25g}"
- f.write(line + "\n")
- f.close()
if diffmax > budtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
@pytest.mark.slow
-@pytest.mark.parametrize("idx, name", list(enumerate(ex)))
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- ws = str(function_tmpdir)
- test = TestFramework()
- test.build(build_model, idx, ws)
- test.run(
- TestSimulation(
- name=name,
- exe_dict=targets,
- exfunc=eval_comp,
- htol=htol[idx],
- idxsim=idx,
- mf6_regression=True,
- ),
- ws,
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ htol=htol[idx],
+ compare="mf6_regression",
+ verbose=False,
)
+ test.run()
diff --git a/autotest/test_gwf_csub_zdisp01.py b/autotest/test_gwf_csub_zdisp01.py
index f6ee90544e8..5b2f6091b3e 100644
--- a/autotest/test_gwf_csub_zdisp01.py
+++ b/autotest/test_gwf_csub_zdisp01.py
@@ -4,12 +4,12 @@
import numpy as np
import pytest
from flopy.utils.compare import compare_heads
+
from framework import TestFramework
-from simulation import TestSimulation
-ex = ["csub_zdisp01"]
+cases = ["csub_zdisp01"]
cmppth = "mfnwt"
-htol = [None for idx in range(len(ex))]
+htol = [None for _ in range(len(cases))]
dtol = 1e-3
budtol = 1e-2
bud_lst = [
@@ -30,14 +30,14 @@
# static model data
# temporal discretization
nper = 31
-perlen = [1.0] + [365.2500000 for i in range(nper - 1)]
-nstp = [1] + [6 for i in range(nper - 1)]
-tsmult = [1.0] + [1.3 for i in range(nper - 1)]
-# tsmult = [1.0] + [1.0 for i in range(nper - 1)]
-steady = [True] + [False for i in range(nper - 1)]
+perlen = [1.0] + [365.2500000 for _ in range(nper - 1)]
+nstp = [1] + [6 for _ in range(nper - 1)]
+tsmult = [1.0] + [1.3 for _ in range(nper - 1)]
+# tsmult = [1.0] + [1.0 for _ in range(nper - 1)]
+steady = [True] + [False for _ in range(nper - 1)]
tdis_rc = []
-for idx in range(nper):
- tdis_rc.append((perlen[idx], nstp[idx], tsmult[idx]))
+for i in range(nper):
+ tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
# spatial discretization data
nlay, nrow, ncol = 3, 20, 20
@@ -192,11 +192,11 @@
# variant SUB package problem 3
-def get_model(idx, dir):
- name = ex[idx]
+def build_models(idx, test):
+ name = cases[idx]
# build MODFLOW 6 files
- ws = dir
+ ws = test.workspace
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
@@ -328,8 +328,10 @@ def get_model(idx, dir):
# build MODFLOW-NWT files
cpth = cmppth
- ws = os.path.join(dir, cpth)
- mc = flopy.modflow.Modflow(name, model_ws=ws, version=cpth)
+ ws = os.path.join(test.workspace, cpth)
+ mc = flopy.modflow.Modflow(
+ name, model_ws=ws, version=cpth, exe_name=test.targets["mfnwt"]
+ )
dis = flopy.modflow.ModflowDis(
mc,
nlay=nlay,
@@ -393,27 +395,24 @@ def get_model(idx, dir):
idroptol=0,
)
- sim.write_simulation()
- mc.write_input()
-
return sim, mc
-def eval_zdisplacement(sim):
- print("evaluating z-displacement...")
-
+def check_output(idx, test):
# MODFLOW 6 total compaction results
- fpth = os.path.join(sim.simpath, "csub_obs.csv")
+ fpth = os.path.join(test.workspace, "csub_obs.csv")
try:
tc = np.genfromtxt(fpth, names=True, delimiter=",")
except:
assert False, f'could not load data from "{fpth}"'
# MODFLOW-2005 total compaction results
- fn = f"{os.path.basename(sim.name)}.total_comp.hds"
- fpth = os.path.join(sim.simpath, "mfnwt", fn)
+ fn = f"{os.path.basename(test.name)}.total_comp.hds"
+ fpth = os.path.join(test.workspace, "mfnwt", fn)
try:
- sobj = flopy.utils.HeadFile(fpth, text="LAYER COMPACTION")
+ sobj = flopy.utils.HeadFile(
+ fpth, text="LAYER COMPACTION", verbose=False
+ )
tc0 = sobj.get_ts((2, wrp[0], wcp[0]))
except:
assert False, f'could not load data from "{fpth}"'
@@ -424,15 +423,15 @@ def eval_zdisplacement(sim):
msg = f"maximum absolute total-compaction difference ({diffmax}) "
if diffmax > dtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
# get results from listing file
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.lst")
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.lst")
budl = flopy.utils.Mf6ListBudget(fpth)
names = list(bud_lst)
d0 = budl.get_budget(names=names)[0]
@@ -451,11 +450,11 @@ def eval_zdisplacement(sim):
d = np.recarray(nbud, dtype=dtype)
for key in bud_lst:
d[key] = 0.0
- fpth = os.path.join(sim.simpath, f"{os.path.basename(sim.name)}.cbc")
- cobj = flopy.utils.CellBudgetFile(fpth, precision="double")
+ fpth = os.path.join(test.workspace, f"{os.path.basename(test.name)}.cbc")
+ cobj = flopy.utils.CellBudgetFile(fpth, precision="double", verbose=False)
kk = cobj.get_kstpkper()
times = cobj.get_times()
- for idx, (k, t) in enumerate(zip(kk, times)):
+ for i, (k, t) in enumerate(zip(kk, times)):
for text in cbc_bud:
qin = 0.0
qout = 0.0
@@ -473,60 +472,59 @@ def eval_zdisplacement(sim):
qout -= vv
else:
qin += vv
- d["totim"][idx] = t
- d["time_step"][idx] = k[0]
+ d["totim"][i] = t
+ d["time_step"][i] = k[0]
d["stress_period"] = k[1]
key = f"{text}_IN"
- d[key][idx] = qin
+ d[key][i] = qin
key = f"{text}_OUT"
- d[key][idx] = qout
+ d[key][i] = qout
diff = np.zeros((nbud, len(bud_lst)), dtype=float)
- for idx, key in enumerate(bud_lst):
- diff[:, idx] = d0[key] - d[key]
+ for i, key in enumerate(bud_lst):
+ diff[:, i] = d0[key] - d[key]
diffmax = np.abs(diff).max()
msg = f"maximum absolute total-budget difference ({diffmax}) "
# write summary
fpth = os.path.join(
- sim.simpath, f"{os.path.basename(sim.name)}.bud.cmp.out"
+ test.workspace, f"{os.path.basename(test.name)}.bud.cmp.out"
)
- f = open(fpth, "w")
- for i in range(diff.shape[0]):
- if i == 0:
- line = f"{'TIME':>10s}"
- for idx, key in enumerate(bud_lst):
- line += f"{key + '_LST':>25s}"
- line += f"{key + '_CBC':>25s}"
- line += f"{key + '_DIF':>25s}"
+ with open(fpth, "w") as f:
+ for i in range(diff.shape[0]):
+ if i == 0:
+ line = f"{'TIME':>10s}"
+ for key in bud_lst:
+ line += f"{key + '_LST':>25s}"
+ line += f"{key + '_CBC':>25s}"
+ line += f"{key + '_DIF':>25s}"
+ f.write(line + "\n")
+ line = f"{d['totim'][i]:10g}"
+ for ii, key in enumerate(bud_lst):
+ line += f"{d0[key][i]:25g}"
+ line += f"{d[key][i]:25g}"
+ line += f"{diff[i, ii]:25g}"
f.write(line + "\n")
- line = f"{d['totim'][i]:10g}"
- for idx, key in enumerate(bud_lst):
- line += f"{d0[key][i]:25g}"
- line += f"{d[key][i]:25g}"
- line += f"{diff[i, idx]:25g}"
- f.write(line + "\n")
- f.close()
if diffmax > budtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
# compare z-displacement data
fpth1 = os.path.join(
- sim.simpath,
- f"{os.path.basename(sim.name)}.zdisplacement.gridbin",
+ test.workspace,
+ f"{os.path.basename(test.name)}.zdisplacement.gridbin",
)
- fpth2 = os.path.join(sim.simpath, cmppth, "csub_zdisp01.vert_disp.hds")
+ fpth2 = os.path.join(test.workspace, cmppth, "csub_zdisp01.vert_disp.hds")
text1 = "CSUB-ZDISPLACE"
text2 = "Z DISPLACEMENT"
fout = os.path.join(
- sim.simpath,
- f"{os.path.basename(sim.name)}.z-displacement.bin.out",
+ test.workspace,
+ f"{os.path.basename(test.name)}.z-displacement.bin.out",
)
success_tst = compare_heads(
None,
@@ -542,28 +540,22 @@ def eval_zdisplacement(sim):
)
msg = f"z-displacement comparison success = {success_tst}"
if success_tst:
- sim.success = True
+ test.success = True
print(msg)
else:
- sim.success = False
+ test.success = False
assert success_tst, msg
@pytest.mark.slow
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- sim, mc = get_model(idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name,
- exe_dict=targets,
- exfunc=eval_zdisplacement,
- htol=htol[idx],
- idxsim=idx,
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ htol=htol[idx],
)
+ test.run()
diff --git a/autotest/test_gwf_disu.py b/autotest/test_gwf_disu.py
index 85880b95d82..c850d48f390 100644
--- a/autotest/test_gwf_disu.py
+++ b/autotest/test_gwf_disu.py
@@ -1,13 +1,26 @@
+"""
+Test of GWF DISU Package. Use the flopy disu tool to create
+a simple regular grid example, but using DISU instead of DIS.
+The first case is just a simple test. For the second case, set
+one of the cells inactive and test to make sure connectivity
+in binary grid file is correct.
+"""
+
import os
import flopy
import numpy as np
+import pytest
from flopy.utils.gridutil import get_disu_kwargs
+from framework import TestFramework
+
+cases = ["disu01a", "disu01b"]
+
-def test_disu_simple(tmpdir, targets):
- mf6 = targets["mf6"]
- name = "disu01a"
+def build_models(idx, test):
+ name = cases[idx]
+ ws = test.workspace
nlay = 3
nrow = 3
ncol = 3
@@ -15,9 +28,26 @@ def test_disu_simple(tmpdir, targets):
delc = 10.0 * np.ones(nrow)
top = 0
botm = [-10, -20, -30]
- disukwargs = get_disu_kwargs(nlay, nrow, ncol, delr, delc, top, botm)
+ disukwargs = get_disu_kwargs(
+ nlay,
+ nrow,
+ ncol,
+ delr,
+ delc,
+ top,
+ botm,
+ )
+ if idx == 1:
+ # for the second test, set one cell to idomain = 0
+ idomain = np.ones((nlay, nrow * ncol), dtype=int)
+ idomain[0, 1] = 0
+ disukwargs["idomain"] = idomain
+
sim = flopy.mf6.MFSimulation(
- sim_name=name, version="mf6", exe_name=mf6, sim_ws=str(tmpdir)
+ sim_name=name,
+ version="mf6",
+ exe_name="mf6",
+ sim_ws=ws,
)
tdis = flopy.mf6.ModflowTdis(sim)
gwf = flopy.mf6.ModflowGwf(sim, modelname=name)
@@ -25,67 +55,36 @@ def test_disu_simple(tmpdir, targets):
disu = flopy.mf6.ModflowGwfdisu(gwf, **disukwargs)
ic = flopy.mf6.ModflowGwfic(gwf, strt=0.0)
npf = flopy.mf6.ModflowGwfnpf(gwf)
- spd = {0: [[(0,), 1.0], [(nrow * ncol - 1,), 0.0]]}
+ spd = {0: [[(0,), 1.0], [(nrow * ncol - 1), 0.0]]}
chd = flopy.mf6.modflow.mfgwfchd.ModflowGwfchd(gwf, stress_period_data=spd)
- sim.write_simulation()
- sim.run_simulation()
+ return sim, None
-def test_disu_idomain_simple(tmpdir, targets):
- mf6 = targets["mf6"]
- name = "disu01b"
- nlay = 3
- nrow = 3
- ncol = 3
- delr = 10.0 * np.ones(ncol)
- delc = 10.0 * np.ones(nrow)
- top = 0
- botm = [-10, -20, -30]
- idomain = np.ones(nlay * nrow * ncol, dtype=int)
- idomain[1] = 0
- disukwargs = get_disu_kwargs(nlay, nrow, ncol, delr, delc, top, botm)
- disukwargs["idomain"] = idomain
- sim = flopy.mf6.MFSimulation(
- sim_name=name, version="mf6", exe_name=mf6, sim_ws=str(tmpdir)
- )
- tdis = flopy.mf6.ModflowTdis(sim)
- gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True)
- ims = flopy.mf6.ModflowIms(sim, print_option="SUMMARY")
- disu = flopy.mf6.ModflowGwfdisu(gwf, **disukwargs)
- ic = flopy.mf6.ModflowGwfic(gwf, strt=0.0)
- npf = flopy.mf6.ModflowGwfnpf(gwf)
- spd = {0: [[(0,), 1.0], [(nrow * ncol - 1,), 0.0]]}
- chd = flopy.mf6.modflow.ModflowGwfchd(gwf, stress_period_data=spd)
- oc = flopy.mf6.modflow.ModflowGwfoc(
- gwf,
- budget_filerecord=f"{name}.bud",
- head_filerecord=f"{name}.hds",
- saverecord=[("HEAD", "LAST"), ("BUDGET", "LAST")],
- )
- sim.write_simulation()
- sim.run_simulation()
+def check_output(idx, test):
+ name = test.name
- # check binary grid file
- fname = os.path.join(str(tmpdir), name + ".disu.grb")
+ fname = os.path.join(test.workspace, name + ".disu.grb")
grbobj = flopy.mf6.utils.MfGrdFile(fname)
nodes = grbobj._datadict["NODES"]
ia = grbobj._datadict["IA"]
ja = grbobj._datadict["JA"]
- assert nodes == disukwargs["nodes"]
- assert np.array_equal(ia[0:4], np.array([1, 4, 4, 7]))
- assert np.array_equal(ja[:6], np.array([1, 4, 10, 3, 6, 12]))
- assert ia[-1] == 127
- assert ia.shape[0] == 28, "ia should have size of 28"
- assert ja.shape[0] == 126, "ja should have size of 126"
- # load head array and ensure nodata value in second cell
- fname = os.path.join(str(tmpdir), name + ".hds")
- hdsobj = flopy.utils.HeadFile(fname)
- head = hdsobj.get_alldata().flatten()
- assert head[1] == 1.0e30
+ if idx == 1:
+ assert np.array_equal(ia[0:4], np.array([1, 4, 4, 7]))
+ assert np.array_equal(ja[:6], np.array([1, 4, 10, 3, 6, 12]))
+ assert ia[-1] == 127
+ assert ia.shape[0] == 28, "ia should have size of 28"
+ assert ja.shape[0] == 126, "ja should have size of 126"
- # load flowja to make sure it is the right size
- fname = os.path.join(str(tmpdir), name + ".bud")
- budobj = flopy.utils.CellBudgetFile(fname, precision="double")
- flowja = budobj.get_data(text="FLOW-JA-FACE")[0].flatten()
- assert flowja.shape[0] == 126
+
+@pytest.mark.parametrize("idx, name", enumerate(cases))
+def test_mf6model(idx, name, function_tmpdir, targets):
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ compare=None,
+ )
+ test.run()
diff --git a/autotest/test_gwf_disv.py b/autotest/test_gwf_disv.py
new file mode 100644
index 00000000000..31b08cba952
--- /dev/null
+++ b/autotest/test_gwf_disv.py
@@ -0,0 +1,96 @@
+"""
+Test of GWF DISV Package. Use the flopy disv tool to create
+a simple regular grid example, but using DISV instead of DIS.
+Use a large offset for x and y vertices to ensure that the area
+calculation in MODFLOW 6 is correct. For the second case, set
+one of the cells inactive and test to make sure connectivity
+in binary grid file is correct.
+"""
+
+import os
+
+import flopy
+import numpy as np
+import pytest
+from flopy.utils.gridutil import get_disv_kwargs
+
+from framework import TestFramework
+
+cases = ["disv01a", "disv01b"]
+
+
+def build_models(idx, test):
+ name = cases[idx]
+ ws = test.workspace
+ nlay = 3
+ nrow = 3
+ ncol = 3
+ delr = 10.0
+ delc = 10.0
+ top = 0
+ botm = [-10, -20, -30]
+ xoff = 100000000.0
+ yoff = 100000000.0
+ disvkwargs = get_disv_kwargs(
+ nlay,
+ nrow,
+ ncol,
+ delr,
+ delc,
+ top,
+ botm,
+ xoff,
+ yoff,
+ )
+ if idx == 1:
+ # for the second test, set one cell to idomain = 0
+ idomain = np.ones((nlay, nrow * ncol), dtype=int)
+ idomain[0, 1] = 0
+ disvkwargs["idomain"] = idomain
+
+ sim = flopy.mf6.MFSimulation(
+ sim_name=name,
+ version="mf6",
+ exe_name="mf6",
+ sim_ws=ws,
+ )
+ tdis = flopy.mf6.ModflowTdis(sim)
+ gwf = flopy.mf6.ModflowGwf(sim, modelname=name)
+ ims = flopy.mf6.ModflowIms(sim, print_option="SUMMARY")
+ disv = flopy.mf6.ModflowGwfdisv(gwf, **disvkwargs)
+ ic = flopy.mf6.ModflowGwfic(gwf, strt=0.0)
+ npf = flopy.mf6.ModflowGwfnpf(gwf)
+ spd = {0: [[(0, 0), 1.0], [(0, nrow * ncol - 1), 0.0]]}
+ chd = flopy.mf6.modflow.mfgwfchd.ModflowGwfchd(gwf, stress_period_data=spd)
+ return sim, None
+
+
+def check_output(idx, test):
+ name = test.name
+
+ fname = os.path.join(test.workspace, name + ".disv.grb")
+ grbobj = flopy.mf6.utils.MfGrdFile(fname)
+ ncpl = grbobj._datadict["NCPL"]
+ ia = grbobj._datadict["IA"]
+ ja = grbobj._datadict["JA"]
+
+ if idx == 1:
+ # assert ncpl == disvkwargs["ncpl"]
+ assert np.array_equal(ia[0:4], np.array([1, 4, 4, 7]))
+ assert np.array_equal(ja[:6], np.array([1, 4, 10, 3, 6, 12]))
+ assert ia[-1] == 127
+ assert ia.shape[0] == 28, "ia should have size of 28"
+ assert ja.shape[0] == 126, "ja should have size of 126"
+
+
+@pytest.mark.parametrize("idx, name", enumerate(cases))
+def test_mf6model(idx, name, function_tmpdir, targets):
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ compare=None,
+ )
+ test.run()
diff --git a/autotest/test_gwf_disv_uzf.py b/autotest/test_gwf_disv_uzf.py
index cf89372267d..e681c041df1 100644
--- a/autotest/test_gwf_disv_uzf.py
+++ b/autotest/test_gwf_disv_uzf.py
@@ -1,5 +1,4 @@
"""
-MODFLOW 6 Autotest
A test of DISV with UZF. Originally created due to a possible bug in the
ASCII output file generated by UZF. Uses quadrilateral cells. The cells
are created from a numpy grid with cells that are 1m x 1m. Althought a DISV
@@ -14,97 +13,37 @@
import flopy.utils.cvfdutil
import numpy as np
import pytest
+from flopy.utils.gridutil import get_disv_kwargs
+
from framework import TestFramework
-from simulation import TestSimulation
-ex = ["disv_with_uzf"]
+cases = ["disv_with_uzf"]
nlay = 5
+nrow = 10
+ncol = 10
+ncpl = nrow * ncol
+delr = 1.0
+delc = 1.0
nper = 5
perlen = [10] * 5
nstp = [5] * 5
tsmult = len(perlen) * [1.0]
+top = 25.0
botm = [20.0, 15.0, 10.0, 5.0, 0.0]
strt = 20
nouter, ninner = 100, 300
hclose, rclose, relax = 1e-9, 1e-3, 0.97
-ghb_ids = []
-
-
-def create_disv_mesh():
- # Create a grid of verts
- nx, ny = (11, 11)
- x = np.linspace(0, 10, nx)
- y = np.linspace(0, 10, ny)
- xv, yv = np.meshgrid(x, y)
- yv = np.flipud(yv)
-
- verts = []
- vid = 0
- vert_lkup = {}
- for i in yv[:, 0]:
- for j in xv[0, :]:
- vert_lkup.update({(float(j), float(i)): vid})
- verts.append([int(vid), float(j), float(i)])
- vid += 1
-
- ivert = []
- ivid = 0
- xyverts = []
- xc, yc = [], [] # for storing the cell center location
- for i in yv[:-1, 0]:
- for j in xv[0, :-1]:
- xlst, ylst = [], []
- vid_lst = []
- # Start with upper-left corner and go clockwise
- for ct in [0, 1, 2, 3]:
- if ct == 0:
- iadj = 0.0
- jadj = 0.0
- elif ct == 1:
- iadj = 0.0
- jadj = 1.0
- elif ct == 2:
- iadj = -1.0
- jadj = 1.0
- elif ct == 3:
- iadj = -1.0
- jadj = 0.0
-
- vid = vert_lkup[(float(j + jadj), float(i + iadj))]
- vid_lst.append(vid)
-
- xlst.append(float(j + jadj))
- ylst.append(float(i + iadj))
-
- xc.append(np.mean(xlst))
- yc.append(np.mean(ylst))
- xyverts.append(list(zip(xlst, ylst)))
-
- rec = [ivid] + vid_lst
- ivert.append(rec)
-
- # if ivert part of right boundary, store id
- if j == 9.0:
- ghb_ids.append(ivid)
-
- ivid += 1
-
- # finally, create a cell2d record
- cell2d = []
- for ix, iv in enumerate(ivert):
- xvt, yvt = np.array(xyverts[ix]).T
- if flopy.utils.geometry.is_clockwise(xvt, yvt):
- rec = [iv[0], xc[ix], yc[ix], len(iv[1:])] + iv[1:]
- else:
- iiv = iv[1:][::-1]
- rec = [iv[0], xc[ix], yc[ix], len(iiv)] + iiv
-
- cell2d.append(rec)
-
- return verts, cell2d
-
-verts, cell2d = create_disv_mesh()
+# use flopy util to get disv arguments
+disvkwargs = get_disv_kwargs(
+ nlay,
+ nrow,
+ ncol,
+ delr,
+ delc,
+ top,
+ botm,
+)
# Work up UZF data
iuzno = 0
@@ -117,7 +56,7 @@ def create_disv_mesh():
eps = 3.5
for k in np.arange(nlay):
- for i in np.arange(0, len(cell2d), 1):
+ for i in np.arange(0, ncpl, 1):
if k == 0:
landflg = 1
surfdp = 0.25
@@ -128,7 +67,7 @@ def create_disv_mesh():
if k == nlay - 1:
ivertcon = -1
else:
- ivertcon = iuzno + len(cell2d)
+ ivertcon = iuzno + ncpl
bndnm = "uzf" + "{0:03d}".format(int(i + 1))
uzf_pkdat.append(
@@ -160,7 +99,7 @@ def create_disv_mesh():
spd = []
iuzno = 0
for k in np.arange(nlay):
- for i in np.arange(0, len(cell2d), 1):
+ for i in np.arange(0, ncpl, 1):
if k == 0:
if t == 0:
finf = 0.15
@@ -180,6 +119,7 @@ def create_disv_mesh():
# Work up the GHB boundary
+ghb_ids = [(ncol - 1) + i * ncol for i in range(nrow)]
ghb_spd = []
cond = 1e4
for k in np.arange(3, 5, 1):
@@ -187,12 +127,11 @@ def create_disv_mesh():
ghb_spd.append([(k, i), 14.0, cond])
-def build_model(idx, dir):
-
- name = ex[idx]
+def build_models(idx, test):
+ name = cases[idx]
# build MODFLOW 6 files
- ws = dir
+ ws = test.workspace
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
@@ -230,18 +169,8 @@ def build_model(idx, dir):
)
sim.register_ims_package(ims, [gwf.name])
- ncpl = len(cell2d)
- nvert = len(verts)
- disv = flopy.mf6.ModflowGwfdisv(
- gwf,
- nlay=nlay,
- ncpl=ncpl,
- nvert=nvert,
- top=25.0,
- botm=botm,
- vertices=verts,
- cell2d=cell2d,
- )
+ # disv
+ disv = flopy.mf6.ModflowGwfdisv(gwf, **disvkwargs)
# initial conditions
ic = flopy.mf6.ModflowGwfic(gwf, strt=strt)
@@ -313,17 +242,15 @@ def build_model(idx, dir):
return sim, None
-def eval_model(sim):
- print("evaluating model...")
-
+def check_output(idx, test):
# Next, get the binary printed heads
- fpth = os.path.join(sim.simpath, sim.name + ".hds")
+ fpth = os.path.join(test.workspace, test.name + ".hds")
hobj = flopy.utils.HeadFile(fpth, precision="double")
hds = hobj.get_alldata()
hds = hds.reshape((np.sum(nstp), 5, 10, 10))
# Get the MF6 cell-by-cell fluxes
- bpth = os.path.join(sim.simpath, sim.name + ".cbc")
+ bpth = os.path.join(test.workspace, test.name + ".cbc")
bobj = flopy.utils.CellBudgetFile(bpth, precision="double")
bobj.get_unique_record_names()
# ' STO-SS'
@@ -341,7 +268,7 @@ def eval_model(sim):
gwet = gwetv.reshape((np.sum(nstp), 5, 10, 10))
# Also retrieve the binary UZET output
- uzpth = os.path.join(sim.simpath, sim.name + ".uzf.bud")
+ uzpth = os.path.join(test.workspace, test.name + ".uzf.bud")
uzobj = flopy.utils.CellBudgetFile(uzpth, precision="double")
uzobj.get_unique_record_names()
# b' FLOW-JA-FACE',
@@ -389,7 +316,7 @@ def eval_model(sim):
for rw in np.arange(arr.shape[0]):
fullrw = arr[rw]
for cl in np.arange(len(fullrw) - 1):
- assert abs(fullrw[cl]) >= abs(fullrw[cl + 1]), (
+ assert abs(fullrw[cl]) + 0.01 >= abs(fullrw[cl + 1]), (
"gwet not decreasing to the right as expected. Stress Period: "
+ str(tm + 1)
+ "; Row: "
@@ -423,7 +350,7 @@ def eval_model(sim):
for rw in np.arange(arr.shape[0]):
fullrw = arr[rw]
for cl in np.arange(len(fullrw) - 1):
- assert abs(fullrw[cl]) <= abs(fullrw[cl + 1]), (
+ assert abs(fullrw[cl]) <= abs(fullrw[cl + 1]) + 0.01, (
"gwet not decreasing to the right as expected. Stress Period: "
+ str(tm + 1)
+ "; Row: "
@@ -454,13 +381,13 @@ def eval_model(sim):
@pytest.mark.slow
-@pytest.mark.parametrize("name", ex)
-def test_mf6model(name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, 0, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, exfunc=eval_model, idxsim=0
- ),
- str(function_tmpdir),
+@pytest.mark.parametrize("idx, name", enumerate(cases))
+def test_mf6model(idx, name, function_tmpdir, targets):
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
)
+ test.run()
diff --git a/autotest/test_gwf_drn_ddrn01.py b/autotest/test_gwf_drn_ddrn01.py
index 7814053e45c..881a2c74622 100644
--- a/autotest/test_gwf_drn_ddrn01.py
+++ b/autotest/test_gwf_drn_ddrn01.py
@@ -3,12 +3,12 @@
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
+cases = ["drn_ddrn01a", "drn_ddrn01b"]
paktest = "drn"
budtol = 1e-2
-ex = ["drn_ddrn01a", "drn_ddrn01b"]
ddir = "data"
newton = [False, True]
@@ -46,10 +46,10 @@ def initial_conditions():
return np.sqrt(h0**2 + x * (h1**2 - h0**2) / (xlen - delr))
-def get_model(idxsim, ws, name):
+def get_model(idx, ws, name):
strt = initial_conditions()
hdsfile = f"{name}.hds"
- if newton[idxsim]:
+ if newton[idx]:
newtonoptions = "NEWTON"
else:
newtonoptions = None
@@ -111,28 +111,26 @@ def get_model(idxsim, ws, name):
return sim
-def build_model(idx, dir):
- name = ex[idx]
+def build_models(idx, test):
+ name = cases[idx]
# build MODFLOW 6 files
- ws = dir
+ ws = test.workspace
sim = get_model(idx, ws, name)
return sim, None
-def eval_disch(sim):
- print("evaluating drain discharge...")
-
+def check_output(idx, test):
# MODFLOW 6 drain discharge results
- fpth = os.path.join(sim.simpath, "drn_obs.csv")
+ fpth = os.path.join(test.workspace, "drn_obs.csv")
try:
tc = np.genfromtxt(fpth, names=True, delimiter=",")
except:
assert False, f'could not load data from "{fpth}"'
# MODFLOW 6 head results
- fpth = os.path.join(sim.simpath, "head_obs.csv")
+ fpth = os.path.join(test.workspace, "head_obs.csv")
try:
th0 = np.genfromtxt(fpth, names=True, delimiter=",")
except:
@@ -140,7 +138,7 @@ def eval_disch(sim):
# calculate the drain flux analytically
xdiff = th0["H1_1_100"] - delev
- f = drain_smoothing(xdiff, ddrn, newton=newton[sim.idxsim])
+ f = drain_smoothing(xdiff, ddrn, newton=newton[idx])
tc0 = f * dcond * (delev - th0["H1_1_100"])
# calculate maximum absolute error
@@ -151,32 +149,29 @@ def eval_disch(sim):
# write summary
fpth = os.path.join(
- sim.simpath, f"{os.path.basename(sim.name)}.disc.cmp.out"
+ test.workspace, f"{os.path.basename(test.name)}.disc.cmp.out"
)
- f = open(fpth, "w")
- line = f"{'TOTIM':>15s}"
- line += f" {'DRN':>15s}"
- line += f" {'UZF':>15s}"
- line += f" {'DIFF':>15s}"
- f.write(line + "\n")
- for i in range(diff.shape[0]):
- line = f"{tc['time'][i]:15g}"
- line += f" {tc['D1_1_100'][i]:15g}"
- line += f" {tc0[i]:15g}"
- line += f" {diff[i]:15g}"
+ with open(fpth, "w") as f:
+ line = f"{'TOTIM':>15s}"
+ line += f" {'DRN':>15s}"
+ line += f" {'UZF':>15s}"
+ line += f" {'DIFF':>15s}"
f.write(line + "\n")
- f.close()
+ for i in range(diff.shape[0]):
+ line = f"{tc['time'][i]:15g}"
+ line += f" {tc['D1_1_100'][i]:15g}"
+ line += f" {tc0[i]:15g}"
+ line += f" {diff[i]:15g}"
+ f.write(line + "\n")
if diffmax > dtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
- return
-
def drain_smoothing(xdiff, xrange, newton=False):
sat = xdiff / xrange
@@ -191,16 +186,13 @@ def drain_smoothing(xdiff, xrange, newton=False):
return f
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, exfunc=eval_disch, idxsim=idx
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
)
+ test.run()
diff --git a/autotest/test_gwf_drn_ddrn02.py b/autotest/test_gwf_drn_ddrn02.py
index 6f4cca16c45..f69068b235f 100644
--- a/autotest/test_gwf_drn_ddrn02.py
+++ b/autotest/test_gwf_drn_ddrn02.py
@@ -3,12 +3,12 @@
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
+cases = ["drn_ddrn02a"]
paktest = "drn"
budtol = 1e-2
-ex = ["drn_ddrn02a"]
# static model data
# spatial discretization
@@ -108,32 +108,30 @@ def get_model(ws, name, uzf=False):
return sim
-def build_model(idx, dir):
- name = ex[idx]
+def build_models(idx, test):
+ name = cases[idx]
# build MODFLOW 6 files
- ws = dir
+ ws = test.workspace
sim = get_model(ws, name)
# build MODFLOW 6 files with UZF package
- ws = os.path.join(dir, "mf6")
+ ws = os.path.join(test.workspace, "mf6")
mc = get_model(ws, name, uzf=True)
return sim, mc
-def eval_disch(sim):
- print("evaluating drain discharge and uzf discharge to land surface...")
-
+def check_output(idx, test):
# MODFLOW 6 drain discharge results
- fpth = os.path.join(sim.simpath, "drn_obs.csv")
+ fpth = os.path.join(test.workspace, "drn_obs.csv")
try:
tc = np.genfromtxt(fpth, names=True, delimiter=",")
except:
assert False, f'could not load data from "{fpth}"'
# MODFLOW 6 uzf discharge results
- fpth = os.path.join(sim.simpath, "mf6", "uzf_obs.csv")
+ fpth = os.path.join(test.workspace, "mf6", "uzf_obs.csv")
try:
tc0 = np.genfromtxt(fpth, names=True, delimiter=",")
except:
@@ -147,44 +145,37 @@ def eval_disch(sim):
# write summary
fpth = os.path.join(
- sim.simpath, f"{os.path.basename(sim.name)}.disc.cmp.out"
+ test.workspace, f"{os.path.basename(test.name)}.disc.cmp.out"
)
- f = open(fpth, "w")
- line = f"{'TOTIM':>15s}"
- line += f" {'DRN':>15s}"
- line += f" {'UZF':>15s}"
- line += f" {'DIFF':>15s}"
- f.write(line + "\n")
- for i in range(diff.shape[0]):
- line = f"{tc0['time'][i]:15g}"
- line += f" {tc['D1_1_1'][i]:15g}"
- line += f" {tc0['D1_1_1'][i]:15g}"
- line += f" {diff[i]:15g}"
+ with open(fpth, "w") as f:
+ line = f"{'TOTIM':>15s}"
+ line += f" {'DRN':>15s}"
+ line += f" {'UZF':>15s}"
+ line += f" {'DIFF':>15s}"
f.write(line + "\n")
- f.close()
+ for i in range(diff.shape[0]):
+ line = f"{tc0['time'][i]:15g}"
+ line += f" {tc['D1_1_1'][i]:15g}"
+ line += f" {tc0['D1_1_1'][i]:15g}"
+ line += f" {diff[i]:15g}"
+ f.write(line + "\n")
if diffmax > dtol:
- sim.success = False
+ test.success = False
msg += f"exceeds {dtol}"
assert diffmax < dtol, msg
else:
- sim.success = True
+ test.success = True
print(" " + msg)
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=str(function_tmpdir),
- exe_dict=targets,
- exfunc=eval_disch,
- idxsim=idx,
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ targets=targets,
)
+ test.run()
diff --git a/autotest/test_gwf_errors.py b/autotest/test_gwf_errors.py
index c7ba06ea3b2..2d23c2fd31b 100644
--- a/autotest/test_gwf_errors.py
+++ b/autotest/test_gwf_errors.py
@@ -1,9 +1,7 @@
"""
-MODFLOW 6 Autotest
Test to make sure that mf6 is failing with the correct error messages. This
test script is set up to be extensible so that simple models can be created
very easily and tested with different options to succeed or fail correctly.
-
"""
import subprocess
@@ -110,7 +108,7 @@ def get_minimal_gwf_simulation(
def test_simple_model_success(function_tmpdir, targets):
- mf6 = targets.mf6
+ mf6 = targets["mf6"]
# test a simple model to make sure it runs and terminates correctly
sim = get_minimal_gwf_simulation(str(function_tmpdir), mf6)
@@ -124,7 +122,7 @@ def test_simple_model_success(function_tmpdir, targets):
def test_empty_folder(function_tmpdir, targets):
- mf6 = targets.mf6
+ mf6 = targets["mf6"]
with pytest.raises(RuntimeError):
# make sure mf6 fails when there is no simulation name file
err_str = "mf6: mfsim.nam is not present in working directory."
@@ -132,7 +130,7 @@ def test_empty_folder(function_tmpdir, targets):
def test_sim_errors(function_tmpdir, targets):
- mf6 = targets.mf6
+ mf6 = targets["mf6"]
with pytest.raises(RuntimeError):
# verify that the correct number of errors are reported
@@ -149,7 +147,7 @@ def test_sim_errors(function_tmpdir, targets):
def test_sim_maxerrors(function_tmpdir, targets):
- mf6 = targets.mf6
+ mf6 = targets["mf6"]
with pytest.raises(RuntimeError):
# verify that the maxerrors keyword gives the correct error output
@@ -176,7 +174,7 @@ def test_sim_maxerrors(function_tmpdir, targets):
def test_disu_errors(function_tmpdir, targets):
- mf6 = targets.mf6
+ mf6 = targets["mf6"]
with pytest.raises(RuntimeError):
disukwargs = get_disu_kwargs(
@@ -204,7 +202,7 @@ def test_disu_errors(function_tmpdir, targets):
def test_solver_fail(function_tmpdir, targets):
- mf6 = targets.mf6
+ mf6 = targets["mf6"]
with pytest.raises(RuntimeError):
# test failed to converge
@@ -221,7 +219,7 @@ def test_solver_fail(function_tmpdir, targets):
def test_fail_continue_success(function_tmpdir, targets):
- mf6 = targets.mf6
+ mf6 = targets["mf6"]
# test continue but failed to converge
tdiskwargs = {"nper": 1, "perioddata": [(10.0, 10, 1.0)]}
diff --git a/autotest/test_gwf_evt01.py b/autotest/test_gwf_evt01.py
index d90d6ccfed5..33b4ca458c2 100644
--- a/autotest/test_gwf_evt01.py
+++ b/autotest/test_gwf_evt01.py
@@ -3,14 +3,13 @@
import flopy
import numpy as np
import pytest
-from framework import TestFramework
-from simulation import TestSimulation
-ex = ["evt01"]
+from framework import TestFramework
+cases = ["evt01"]
-def build_model(idx, dir):
+def build_models(idx, test):
nlay, nrow, ncol = 1, 1, 3
chdheads = list(np.linspace(1, 100))
nper = len(chdheads)
@@ -28,10 +27,10 @@ def build_model(idx, dir):
for i in range(nper):
tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
- name = ex[idx]
+ name = cases[idx]
# build MODFLOW 6 files
- ws = dir
+ ws = test.workspace
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
@@ -153,14 +152,12 @@ def etfunc(h, qmax, surf, exdp, petm, pxdp, petm0=1.0):
return q, hcof, rhs
-def eval_model(sim):
- print("evaluating model...")
-
- fpth = os.path.join(sim.simpath, "evt01.cbc")
+def check_output(idx, test):
+ fpth = os.path.join(test.workspace, "evt01.cbc")
bobj = flopy.utils.CellBudgetFile(fpth, precision="double")
records = bobj.get_data(text="evt")
- fpth = os.path.join(sim.simpath, "evt01.hds")
+ fpth = os.path.join(test.workspace, "evt01.hds")
hobj = flopy.utils.HeadFile(fpth, precision="double")
heads = hobj.get_alldata()
@@ -177,16 +174,13 @@ def eval_model(sim):
assert np.allclose(sim_evt_rate, cal_evt_rate), msg
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, exfunc=eval_model, idxsim=idx
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
)
+ test.run()
diff --git a/autotest/test_gwf_evt02.py b/autotest/test_gwf_evt02.py
index ad1191eb7cc..a8b6adebfbb 100644
--- a/autotest/test_gwf_evt02.py
+++ b/autotest/test_gwf_evt02.py
@@ -3,14 +3,13 @@
import flopy
import numpy as np
import pytest
-from framework import TestFramework
-from simulation import TestSimulation
-ex = ["evt02"]
+from framework import TestFramework
+cases = ["evt02"]
-def build_model(idx, dir, exe):
+def build_models(idx, test):
nlay, nrow, ncol = 1, 1, 3
chdheads = list(np.linspace(1, 100))
nper = len(chdheads)
@@ -28,12 +27,12 @@ def build_model(idx, dir, exe):
for i in range(nper):
tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
- name = ex[idx]
+ name = cases[idx]
# build MODFLOW 6 files
- ws = dir
+ ws = test.workspace
sim = flopy.mf6.MFSimulation(
- sim_name=name, version="mf6", exe_name=exe, sim_ws=ws
+ sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
# create tdis package
tdis = flopy.mf6.ModflowTdis(
@@ -151,25 +150,19 @@ def etfunc(h, qmax, surf, exdp, petm, pxdp, petm0=1.0):
return q, hcof, rhs
-def eval_model(sim):
- print("evaluating model...")
-
+def check_output(idx, test):
# The nature of the bug is that the model crashes with nseg=1
- fpth = os.path.join(sim.simpath, "evt02.cbc")
+ fpth = os.path.join(test.workspace, "evt02.cbc")
assert os.path.isfile(fpth), "model did not run with nseg=1 in EVT input"
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- mf6 = targets["mf6"]
- test.build(lambda i, w: build_model(i, w, mf6), idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, exfunc=eval_model, idxsim=idx
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
)
+ test.run()
diff --git a/autotest/test_gwf_exgmvr01.py b/autotest/test_gwf_exgmvr01.py
new file mode 100644
index 00000000000..131a7c72af0
--- /dev/null
+++ b/autotest/test_gwf_exgmvr01.py
@@ -0,0 +1,309 @@
+"""
+Based on sft01 gwf model, but split into two gwf models test gwf-gwf and
+mvr. The single model is run as the regression model
+
+The final split model look like:
+
+ flow1 flow2
+ sfr 1 2 3 4 5 6 7 gwfgwf-mvr => 1 2 3 4 5 6 7
+ ------------- -------------
+ gwf 1 2 3 4 5 6 7 gwfgwf => 1 2 3 4 5 6 7
+"""
+
+
+import flopy
+import numpy as np
+import pytest
+
+from framework import TestFramework
+
+cases = ["gwf_exgmvr01"]
+
+# properties for single model combination
+lx = 14.0
+lz = 1.0
+nlay = 1
+nrow = 1
+ncol = 14
+nper = 1
+delc = 1.0
+delr = lx / ncol
+delz = lz / nlay
+top = 0.0
+botm = [top - (k + 1) * delz for k in range(nlay)]
+Kh = 20.0
+Kv = 20.0
+
+
+def build_simulation(idx, sim_ws, sim_type="single"):
+ name = cases[idx]
+ sim = flopy.mf6.MFSimulation(
+ sim_name=name,
+ sim_ws=sim_ws,
+ )
+
+ tdis = flopy.mf6.ModflowTdis(
+ sim,
+ time_units="DAYS",
+ nper=nper,
+ )
+
+ # Flow solver
+ ims = flopy.mf6.ModflowIms(
+ sim,
+ complexity="simple",
+ print_option="ALL",
+ outer_dvclose=1e-6,
+ inner_dvclose=1e-6,
+ )
+
+ if sim_type == "single":
+ gwf_types = ("single",)
+ else:
+ gwf_types = ("left", "right")
+ for gwf_type in gwf_types:
+ gwf = build_gwf(sim, gwf_type=gwf_type)
+
+ if sim_type != "single":
+ build_exchanges(sim)
+
+ return sim
+
+
+def build_gwf(sim, gwf_type="single"):
+ if gwf_type == "single":
+ nc = ncol
+ else:
+ nc = int(ncol / 2)
+
+ # create gwf model
+ gwf = flopy.mf6.ModflowGwf(
+ sim,
+ modelname=gwf_type,
+ save_flows=True,
+ )
+
+ dis = flopy.mf6.ModflowGwfdis(
+ gwf,
+ nlay=nlay,
+ nrow=nrow,
+ ncol=nc,
+ delr=delr,
+ delc=delc,
+ top=top,
+ botm=botm,
+ )
+
+ # initial conditions
+ ic = flopy.mf6.ModflowGwfic(gwf, strt=0.0)
+
+ # node property flow
+ npf = flopy.mf6.ModflowGwfnpf(
+ gwf,
+ save_specific_discharge=True,
+ icelltype=0,
+ k=Kh,
+ k33=Kv,
+ )
+
+ # add chd to right edge
+ if gwf_type in ("single", "right"):
+ chdlist = [
+ [(0, 0, nc - 1), 0.0],
+ ]
+ chd = flopy.mf6.ModflowGwfchd(
+ gwf,
+ stress_period_data=chdlist,
+ pname="chd_right",
+ )
+
+ # inject water into left edge
+ if gwf_type in ("single", "left"):
+ wellist = [
+ [(0, 0, 0), 1.0],
+ ]
+ wel = flopy.mf6.ModflowGwfwel(
+ gwf,
+ stress_period_data=wellist,
+ pname="well_left",
+ )
+
+ # pak_data = [ [] []]
+ rlen = delr
+ rwid = delc
+ rgrd = 1.0
+ rtp = 0.0
+ rbth = 0.1
+ rhk = 0.01
+ rman = 1.0
+ ustrf = 1.0
+ ndv = 0
+ pak_data = []
+ for irno in range(nc):
+ ncon = 2
+ if irno in [0, nc - 1]:
+ ncon = 1
+ cellid = (0, 0, irno)
+ t = (
+ irno,
+ cellid,
+ rlen,
+ rwid,
+ rgrd,
+ rtp,
+ rbth,
+ rhk,
+ rman,
+ ncon,
+ ustrf,
+ ndv,
+ )
+ pak_data.append(t)
+
+ con_data = []
+ for irno in range(nc):
+ if irno == 0:
+ t = (irno, -(irno + 1))
+ elif irno == nc - 1:
+ t = (irno, irno - 1)
+ else:
+ t = (irno, irno - 1, -(irno + 1))
+ con_data.append(t)
+
+ if gwf_type in ("single", "left"):
+ p_data = [
+ (0, "INFLOW", 1.0),
+ ]
+ else:
+ p_data = None
+
+ if gwf_type != "single":
+ mover = True
+ else:
+ mover = None
+
+ sfr = flopy.mf6.modflow.ModflowGwfsfr(
+ gwf,
+ save_flows=True,
+ print_input=True,
+ print_flows=True,
+ print_stage=True,
+ mover=mover,
+ stage_filerecord=f"{gwf_type}.sfr.stg",
+ budget_filerecord=f"{gwf_type}.sfr.bud",
+ nreaches=nc,
+ packagedata=pak_data,
+ connectiondata=con_data,
+ perioddata=p_data,
+ pname=f"sfr_{gwf_type}",
+ )
+
+ # output control
+ oc = flopy.mf6.ModflowGwfoc(
+ gwf,
+ budget_filerecord=f"{gwf_type}.cbc",
+ head_filerecord=f"{gwf_type}.hds",
+ saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")],
+ printrecord=[("BUDGET", "LAST")],
+ )
+
+ return gwf
+
+
+def build_exchanges(sim):
+ # add a gwf-gwf exchange
+ gwfgwf_data = [
+ (
+ (0, 0, int(ncol / 2) - 1),
+ (0, 0, 0),
+ 1,
+ delr / 2.0,
+ delr / 2.0,
+ delc,
+ 0.0,
+ delr,
+ )
+ ]
+
+ # GWF-GWF
+ mvr_filerecord = "left-right.exg.mvr"
+ gwfgwf = flopy.mf6.ModflowGwfgwf(
+ sim,
+ exgtype="GWF6-GWF6",
+ nexg=len(gwfgwf_data),
+ exgmnamea="left",
+ exgmnameb="right",
+ exchangedata=gwfgwf_data,
+ auxiliary=["ANGLDEGX", "CDIST"],
+ dev_interfacemodel_on=False,
+ filename="left-right.exg",
+ )
+
+ # simulation GWF-GWF Mover
+ maxmvr, maxpackages = 1, 2
+ mvrpack_sim = [["left", "sfr_left"], ["right", "sfr_right"]]
+ mvrspd = [
+ [
+ "left",
+ "sfr_left",
+ int(ncol / 2) - 1,
+ "right",
+ "sfr_right",
+ 0,
+ "FACTOR",
+ 1.00,
+ ]
+ ]
+
+ gwfgwf.mvr.initialize(
+ modelnames=True,
+ maxmvr=maxmvr,
+ print_flows=True,
+ maxpackages=maxpackages,
+ packages=mvrpack_sim,
+ perioddata=mvrspd,
+ filename=mvr_filerecord,
+ )
+
+
+def build_models(idx, test):
+ sim_ws = test.workspace / "mf6"
+ sim_base = build_simulation(idx, sim_ws)
+ sim = build_simulation(idx, test.workspace, sim_type="split")
+ return sim, sim_base
+
+
+def check_output(idx, test):
+ # base simulations stage
+ ws = test.workspace
+ fpth = ws / "mf6/single.sfr.stg"
+ single_stage_obj = flopy.utils.HeadFile(fpth, text="STAGE")
+ single_stage = single_stage_obj.get_data().squeeze()
+
+ stage = single_stage.copy()
+
+ i1 = int(ncol / 2)
+ fpth = ws / "left.sfr.stg"
+ stage_obj = flopy.utils.HeadFile(fpth, text="STAGE")
+ v = stage_obj.get_data().squeeze()
+ stage[:i1] = v[:]
+
+ fpth = ws / "right.sfr.stg"
+ stage_obj = flopy.utils.HeadFile(fpth, text="STAGE")
+ v = stage_obj.get_data().squeeze()
+ stage[i1:] = v[:]
+
+ assert np.allclose(single_stage, stage), "sfr stages are not equal"
+
+
+@pytest.mark.parametrize("idx, name", enumerate(cases))
+def test_mf6model(idx, name, function_tmpdir, targets):
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ compare=None,
+ )
+ test.run()
diff --git a/autotest/test_gwf_exgmvr02.py b/autotest/test_gwf_exgmvr02.py
new file mode 100644
index 00000000000..30ea52f079c
--- /dev/null
+++ b/autotest/test_gwf_exgmvr02.py
@@ -0,0 +1,355 @@
+"""
+Test the exchange mover functionality to work both ways,
+also in parallel. Use the following setup of two connected
+DIS models with a stream flow crossing the boundary twice:
+
+
+ left: right:
+ . . . . . . . . . . 1
+ sfr_in => x x x x x x x x x x 2
+ . . . . . gwfgwf . . . . x 3
+sfr_out <= x x x x x x x x x x 4
+ . . . . . . . . . . 5
+ 1 2 3 4 5 6 7 8 9 10
+
+The "single" model is also constructed as a reference.
+"""
+
+
+import flopy
+import numpy as np
+import pytest
+
+from framework import TestFramework
+
+cases = ["gwf_exgmvr02"]
+
+nper = 1
+
+lx = 10.0
+ly = 5.0
+lz = 1.0
+nlay = 1
+nrow = 5
+ncol = 10
+ncol_split = int(ncol / 2)
+nper = 1
+delc = ly / nrow
+delr = lx / ncol
+delz = lz / nlay
+top = 0.0
+botm = [top - (k + 1) * delz for k in range(nlay)]
+Kh = 20.0
+Kv = 20.0
+
+
+def make_sfr_data(sfr_cells, ireach_offset=0):
+ """generate package and connection data for a string of connected cells"""
+
+ pak_data = []
+ con_data = []
+
+ rlen = delr
+ rwid = delc
+ rgrd = 1.0
+ rtp = 0.0
+ rbth = 0.1
+ rhk = 0.01
+ rman = 1.0
+ ustrf = 1.0
+ ndv = 0
+ nc = len(sfr_cells)
+ for ridx, cellid in enumerate(sfr_cells):
+ irno = ridx + ireach_offset
+ ncon = 2
+ if ridx in [0, nc - 1]:
+ ncon = 1
+ t = (
+ irno,
+ cellid,
+ rlen,
+ rwid,
+ rgrd,
+ rtp,
+ rbth,
+ rhk,
+ rman,
+ ncon,
+ ustrf,
+ ndv,
+ )
+ pak_data.append(t)
+
+ if ridx == 0: # first one only connected to the next
+ c = (irno, -(irno + 1))
+ elif ridx == nc - 1: # last one only connected to the prev
+ c = (irno, irno - 1)
+ else: # connect upstream and downstream
+ c = (irno, irno - 1, -(irno + 1))
+ con_data.append(c)
+
+ return pak_data, con_data
+
+
+def build_gwf(sim, gwf_type="single"):
+ if gwf_type == "single":
+ nc = ncol
+ else: # left or right
+ nc = int(ncol / 2)
+
+ # create gwf model
+ gwf = flopy.mf6.ModflowGwf(
+ sim,
+ modelname=gwf_type,
+ save_flows=True,
+ )
+
+ dis = flopy.mf6.ModflowGwfdis(
+ gwf,
+ nlay=nlay,
+ nrow=nrow,
+ ncol=nc,
+ delr=delr,
+ delc=delc,
+ top=top,
+ botm=botm,
+ )
+
+ # initial conditions
+ ic = flopy.mf6.ModflowGwfic(gwf, strt=0.0)
+
+ # node property flow
+ npf = flopy.mf6.ModflowGwfnpf(
+ gwf,
+ save_specific_discharge=True,
+ icelltype=0,
+ k=Kh,
+ k33=Kv,
+ )
+
+ # add chd to right edge
+ if gwf_type in ("single", "right"):
+ chdlist = [[(0, irow, nc - 1), 0.0] for irow in range(nrow)]
+ chd = flopy.mf6.ModflowGwfchd(
+ gwf,
+ stress_period_data=chdlist,
+ pname="chd_right",
+ )
+
+ left_sfr1 = [(0, 1, icol) for icol in range(ncol_split)]
+ left_sfr2 = [(0, 3, icol - 1) for icol in range(ncol_split, 0, -1)]
+
+ right_sfr = [(0, 1, icol) for icol in range(ncol_split)]
+ right_sfr.append((0, 2, ncol_split - 1))
+ right_sfr.extend([(0, 3, icol - 1) for icol in range(ncol_split, 0, -1)])
+
+ # shift for single model:
+ for t in right_sfr:
+ print(t)
+ right_sfr_single = [(t[0], t[1], t[2] + ncol_split) for t in right_sfr]
+
+ package_data = []
+ if gwf_type == "single":
+ left_sfr1.extend(right_sfr_single)
+ left_sfr1.extend(left_sfr2)
+ package_data, conn_data = make_sfr_data(left_sfr1)
+ elif gwf_type == "left":
+ # these two are not connected
+ package_data, conn_data = make_sfr_data(left_sfr1)
+ package_data2, conn_data2 = make_sfr_data(
+ left_sfr2, ireach_offset=len(package_data)
+ )
+ package_data.extend(package_data2)
+ conn_data.extend(conn_data2)
+ elif gwf_type == "right":
+ package_data, conn_data = make_sfr_data(right_sfr)
+
+ nreaches = len(package_data)
+
+ if gwf_type in ("single", "left"):
+ period_data = [
+ (0, "INFLOW", 100.0),
+ ]
+ else:
+ period_data = None
+
+ if gwf_type != "single":
+ mover = True
+ else:
+ mover = None
+
+ sfr = flopy.mf6.modflow.ModflowGwfsfr(
+ gwf,
+ save_flows=True,
+ print_input=True,
+ print_flows=True,
+ print_stage=True,
+ mover=mover,
+ stage_filerecord=f"{gwf_type}.sfr.stg",
+ budget_filerecord=f"{gwf_type}.sfr.bud",
+ nreaches=nreaches,
+ packagedata=package_data,
+ connectiondata=conn_data,
+ perioddata=period_data,
+ pname=f"sfr_{gwf_type}",
+ )
+
+ # output control
+ oc = flopy.mf6.ModflowGwfoc(
+ gwf,
+ budget_filerecord=f"{gwf_type}.cbc",
+ head_filerecord=f"{gwf_type}.hds",
+ saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")],
+ printrecord=[("BUDGET", "LAST")],
+ )
+
+ return gwf
+
+
+def build_exchanges(sim):
+ # add a gwf-gwf exchange
+ gwfgwf_data = [
+ (
+ (0, irow, ncol_split - 1),
+ (0, irow, 0),
+ 1,
+ delr / 2.0,
+ delr / 2.0,
+ delc,
+ 0.0,
+ delr,
+ )
+ for irow in range(nrow)
+ ]
+
+ # GWF-GWF
+ mvr_filerecord = "left-right.exg.mvr"
+ gwfgwf = flopy.mf6.ModflowGwfgwf(
+ sim,
+ exgtype="GWF6-GWF6",
+ nexg=len(gwfgwf_data),
+ exgmnamea="left",
+ exgmnameb="right",
+ exchangedata=gwfgwf_data,
+ auxiliary=["ANGLDEGX", "CDIST"],
+ dev_interfacemodel_on=False,
+ filename="left-right.exg",
+ )
+
+ # simulation GWF-GWF Mover
+ maxmvr, maxpackages = 2, 2
+ mvrpack_sim = [["left", "sfr_left"], ["right", "sfr_right"]]
+ mvrspd = [
+ # connect left to right
+ [
+ "left",
+ "sfr_left",
+ ncol_split - 1,
+ "right",
+ "sfr_right",
+ 0,
+ "FACTOR",
+ 1.00,
+ ],
+ # connect right to left
+ [
+ "right",
+ "sfr_right",
+ 2 * ncol_split,
+ "left",
+ "sfr_left",
+ ncol_split,
+ "FACTOR",
+ 1.00,
+ ],
+ ]
+
+ gwfgwf.mvr.initialize(
+ modelnames=True,
+ maxmvr=maxmvr,
+ print_flows=True,
+ maxpackages=maxpackages,
+ packages=mvrpack_sim,
+ perioddata=mvrspd,
+ filename=mvr_filerecord,
+ )
+
+
+def build_simulation(idx, sim_ws, sim_type="single"):
+ name = cases[idx]
+ sim = flopy.mf6.MFSimulation(
+ sim_name=name,
+ sim_ws=sim_ws,
+ )
+
+ tdis = flopy.mf6.ModflowTdis(
+ sim,
+ time_units="DAYS",
+ nper=nper,
+ )
+
+ # Flow solver
+ ims = flopy.mf6.ModflowIms(
+ sim,
+ complexity="simple",
+ print_option="ALL",
+ outer_dvclose=1e-6,
+ inner_dvclose=1e-6,
+ )
+
+ if sim_type == "single":
+ gwf_types = ("single",)
+ else:
+ gwf_types = ("left", "right")
+ for gwf_type in gwf_types:
+ gwf = build_gwf(sim, gwf_type=gwf_type)
+
+ if sim_type != "single":
+ build_exchanges(sim)
+
+ return sim
+
+
+def build_models(idx, test):
+ sim_ws = test.workspace / "mf6"
+ sim_base = build_simulation(idx, sim_ws)
+ sim = build_simulation(idx, test.workspace, sim_type="split")
+ return sim, sim_base
+
+
+def check_output(idx, test):
+ # base simulations stage
+ ws = test.workspace
+ fpth = ws / "mf6/single.sfr.stg"
+ single_stage_obj = flopy.utils.HeadFile(fpth, text="STAGE")
+ single_stage = single_stage_obj.get_data().squeeze()
+
+ fpth = ws / "left.sfr.stg"
+ stage_obj = flopy.utils.HeadFile(fpth, text="STAGE")
+ v = stage_obj.get_data().squeeze()
+ assert np.allclose(
+ single_stage[0:ncol_split], v[0:ncol_split]
+ ), "sfr left (segment I) stages are not equal"
+ assert np.allclose(
+ single_stage[3 * ncol_split + 1 :], v[ncol_split:]
+ ), "sfr left (segment II) stages are not equal"
+
+ fpth = ws / "right.sfr.stg"
+ stage_obj = flopy.utils.HeadFile(fpth, text="STAGE")
+ v = stage_obj.get_data().squeeze()
+ assert np.allclose(
+ single_stage[ncol_split : 3 * ncol_split + 1], v
+ ), "sfr right stages are not equal"
+
+
+@pytest.mark.parametrize("idx, name", enumerate(cases))
+def test_mf6model(idx, name, function_tmpdir, targets):
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ compare=None,
+ )
+ test.run()
diff --git a/autotest/test_gwf_henry_nr.py b/autotest/test_gwf_henry_nr.py
index c072f3f2b58..a0d3f65a0fe 100644
--- a/autotest/test_gwf_henry_nr.py
+++ b/autotest/test_gwf_henry_nr.py
@@ -1,18 +1,19 @@
-# This is the Henry, Newton-Raphson problem described by Langevin et al (2020)
-# with a 20 by 40 grid instead of the 40 by 80 grid described in the paper.
-# There is freshwater inflow on the left and a sloping sea boundary on the
-# right with moves up and down according to a simple sine function. GHBs
-# and DRNs alternate and move up and down along the boundary to represent
-# the effects of tides on the aquifer.
+"""
+The Henry, Newton-Raphson problem described by Langevin et al (2020)
+with a 20x40 grid instead of the 40x80 grid described in the paper.
+There is freshwater inflow on the left. A sloping sea boundary on the
+right moves up and down according to a simple sine function. GHBs and
+DRNs alternate and move up and down along the boundary to represent
+the effects of tides on the aquifer.
+"""
import flopy
import numpy as np
import pytest
-from conftest import should_compare
+
from framework import TestFramework
-from simulation import TestSimulation
-ex = ["gwf_henrynr01"]
+cases = ["gwf_henrynr01"]
# global model variables
nlay = 20
@@ -65,9 +66,9 @@ def sinfunc(a, b, c, d, x):
return a * np.sin(b * (x - c)) + d
-def build_model(idx, dir, exe):
- ws = dir
- name = ex[idx]
+def build_models(idx, test):
+ ws = test.workspace
+ name = cases[idx]
nrow = 1
delr = lx / ncol
@@ -89,7 +90,7 @@ def build_model(idx, dir, exe):
# build MODFLOW 6 files
sim = flopy.mf6.MFSimulation(
- sim_name=name, version="mf6", exe_name=exe, sim_ws=ws
+ sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
sim.name_file.continue_ = False
@@ -231,26 +232,16 @@ def build_model(idx, dir, exe):
return sim, None
-# - No need to change any code below
@pytest.mark.slow
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
name = "gwf-henry-nr"
- comparisons = {name: ("6.2.1",)}
- mf6 = targets["mf6"]
- test = TestFramework()
- test.build(lambda i, w: build_model(i, w, mf6), idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name,
- exe_dict=targets,
- idxsim=idx,
- mf6_regression=True,
- cmp_verbose=False,
- make_comparison=should_compare(name, comparisons, targets),
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ compare="mf6_regression",
+ verbose=False,
)
+ test.run()
diff --git a/autotest/test_gwf_ifmod_buy.py b/autotest/test_gwf_ifmod_buy.py
index 428b025c230..0f0bfa5df4d 100644
--- a/autotest/test_gwf_ifmod_buy.py
+++ b/autotest/test_gwf_ifmod_buy.py
@@ -1,34 +1,36 @@
+"""
+General test for the interface model approach.
+It compares the result of a single reference model
+to the equivalent case where the domain is decomposed
+and joined by a GWF-GWF exchange.
+
+ 'refmodel' 'leftmodel' 'rightmodel'
+
+ 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
+ 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
+ 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
+ 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
+ 1 1 1 1 1 1 1 1 1 1 VS 1 1 1 1 1 + 1 1 1 1 1
+ 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
+ 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
+ 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
+ 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
+ 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
+
+We assert equality on the head values and the (components of)
+specific discharges. All models are part of the same solution
+for convenience. Finally, the budget error is checked.
+"""
+
import os
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
-
-# General test for the interface model approach.
-# It compares the result of a single reference model
-# to the equivalent case where the domain is decomposed
-# and joined by a GWF-GWF exchange.
-#
-# 'refmodel' 'leftmodel' 'rightmodel'
-#
-# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-# 1 1 1 1 1 1 1 1 1 1 VS 1 1 1 1 1 + 1 1 1 1 1
-# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-#
-# We assert equality on the head values and the (components of)
-# specific discharges. All models are part of the same solution
-# for convenience. Finally, the budget error is checked.
-
-ex = ["ifmod_buy01"]
+
+cases = ["ifmod_buy01"]
# some global convenience...:
# model names
@@ -84,7 +86,7 @@
def get_model(idx, dir):
- name = ex[idx]
+ name = cases[idx]
# parameters and spd
# tdis
@@ -179,7 +181,6 @@ def get_model(idx, dir):
def add_refmodel(sim):
-
gwf = flopy.mf6.ModflowGwf(sim, modelname=mname_ref, save_flows=True)
dis = flopy.mf6.ModflowGwfdis(
@@ -231,7 +232,6 @@ def add_refmodel(sim):
def add_leftmodel(sim):
-
left_chd = [[(0, irow, 0), h_left] for irow in range(nrow)]
chd_spd_left = {0: left_chd}
@@ -273,7 +273,6 @@ def add_leftmodel(sim):
def add_rightmodel(sim):
-
right_chd = [[(0, irow, ncol_right - 1), h_right] for irow in range(nrow)]
chd_spd_right = {0: right_chd}
@@ -317,7 +316,6 @@ def add_rightmodel(sim):
def add_gwfexchange(sim):
-
angldegx = 0.0
cdist = delr
gwfgwf_data = [
@@ -346,7 +344,6 @@ def add_gwfexchange(sim):
def add_gwtrefmodel(sim):
-
gwt = flopy.mf6.ModflowGwt(sim, modelname=mname_gwtref)
dis = flopy.mf6.ModflowGwtdis(
@@ -393,7 +390,6 @@ def add_gwtrefmodel(sim):
def add_gwtleftmodel(sim):
-
gwt = flopy.mf6.ModflowGwt(sim, modelname=mname_gwtleft)
dis = flopy.mf6.ModflowGwtdis(
@@ -440,7 +436,6 @@ def add_gwtleftmodel(sim):
def add_gwtrightmodel(sim):
-
gwt = flopy.mf6.ModflowGwt(sim, modelname=mname_gwtright)
dis = flopy.mf6.ModflowGwtdis(
@@ -489,7 +484,6 @@ def add_gwtrightmodel(sim):
def add_gwtexchange(sim):
-
angldegx = 0.0
cdist = delr
gwtgwt_data = [
@@ -518,8 +512,8 @@ def add_gwtexchange(sim):
)
-def build_model(idx, exdir):
- sim = get_model(idx, exdir)
+def build_models(idx, test):
+ sim = get_model(idx, test.workspace)
return sim, None
@@ -543,27 +537,27 @@ def qxqyqz(fname, nlay, nrow, ncol):
return qx, qy, qz
-def compare_to_ref(sim):
+def check_output(idx, test):
print("comparing heads and spec. discharge to single model reference...")
- fpth = os.path.join(sim.simpath, f"{mname_ref}.hds")
+ fpth = os.path.join(test.workspace, f"{mname_ref}.hds")
hds = flopy.utils.HeadFile(fpth)
heads = hds.get_data()
- fpth = os.path.join(sim.simpath, f"{mname_ref}.cbc")
+ fpth = os.path.join(test.workspace, f"{mname_ref}.cbc")
nlay, nrow, ncol = heads.shape
qxb, qyb, qzb = qxqyqz(fpth, nlay, nrow, ncol)
- fpth = os.path.join(sim.simpath, f"{mname_left}.hds")
+ fpth = os.path.join(test.workspace, f"{mname_left}.hds")
hds = flopy.utils.HeadFile(fpth)
heads_left = hds.get_data()
- fpth = os.path.join(sim.simpath, f"{mname_left}.cbc")
+ fpth = os.path.join(test.workspace, f"{mname_left}.cbc")
nlay, nrow, ncol = heads_left.shape
qxb_left, qyb_left, qzb_left = qxqyqz(fpth, nlay, nrow, ncol)
- fpth = os.path.join(sim.simpath, f"{mname_right}.hds")
+ fpth = os.path.join(test.workspace, f"{mname_right}.hds")
hds = flopy.utils.HeadFile(fpth)
heads_right = hds.get_data()
- fpth = os.path.join(sim.simpath, f"{mname_right}.cbc")
+ fpth = os.path.join(test.workspace, f"{mname_right}.cbc")
nlay, nrow, ncol = heads_right.shape
qxb_right, qyb_right, qzb_right = qxqyqz(fpth, nlay, nrow, ncol)
@@ -634,7 +628,7 @@ def compare_to_ref(sim):
# check budget error from .lst file
for mname in [mname_ref, mname_left, mname_right]:
- fpth = os.path.join(sim.simpath, f"{mname}.lst")
+ fpth = os.path.join(test.workspace, f"{mname}.lst")
for line in open(fpth):
if line.lstrip().startswith("PERCENT"):
cumul_balance_error = float(line.split()[3])
@@ -645,17 +639,14 @@ def compare_to_ref(sim):
)
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
@pytest.mark.developmode
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, exfunc=compare_to_ref, idxsim=idx
- ),
- str(function_tmpdir),
- )
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ )
+ test.run()
diff --git a/autotest/test_gwf_ifmod_idomain.py b/autotest/test_gwf_ifmod_idomain.py
index 4f89039dd14..8f5ac4c18d0 100644
--- a/autotest/test_gwf_ifmod_idomain.py
+++ b/autotest/test_gwf_ifmod_idomain.py
@@ -1,28 +1,30 @@
+"""
+General test for the interface model approach.
+It compares the result of a single reference model
+to the equivalent case where the domain is decomposed
+and joined by a GWF-GWF exchange.
+
+In this case we test the use of idomain at the interface
+
+ 'refmodel' 'leftmodel' 'rightmodel'
+
+ layer 1: 1 1 1 1 0 0 1 1 1 1 1 1 1 1 0 0 1 1 1 1
+ layer 2: 1 1 1 1 1 1 1 1 1 1 VS 1 1 1 1 1 + 1 1 1 1 1
+ layer 3: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
+
+We assert equality on the head values. All models are part of a single
+solution for convenience. Finally, the budget error is checked.
+"""
+
import os
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
-
-# General test for the interface model approach.
-# It compares the result of a single reference model
-# to the equivalent case where the domain is decomposed
-# and joined by a GWF-GWF exchange.
-#
-# In this case we test the use of idomain at the interface
-#
-# 'refmodel' 'leftmodel' 'rightmodel'
-#
-# layer 1: 1 1 1 1 0 0 1 1 1 1 1 1 1 1 0 0 1 1 1 1
-# layer 2: 1 1 1 1 1 1 1 1 1 1 VS 1 1 1 1 1 + 1 1 1 1 1
-# layer 3: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-#
-# We assert equality on the head values. All models are part of a single
-# solution for convenience. Finally, the budget error is checked.
-
-ex = ["ifmod_ibound"]
+
+cases = ["ifmod_ibound"]
# some global convenience...:
# model names
@@ -73,7 +75,7 @@
h_right = 75.0
# initial head
-h_start = 0.0
+h_start = 0.0
# head boundaries
@@ -103,7 +105,7 @@
def get_model(idx, dir):
- name = ex[idx]
+ name = cases[idx]
# parameters and spd
# tdis
@@ -272,11 +274,7 @@ def add_rightmodel(sim):
)
ic = flopy.mf6.ModflowGwfic(gwf, strt=h_start)
npf = flopy.mf6.ModflowGwfnpf(
- gwf,
- save_specific_discharge=True,
- save_flows=True,
- icelltype=0,
- k=hk
+ gwf, save_specific_discharge=True, save_flows=True, icelltype=0, k=hk
)
chd = flopy.mf6.ModflowGwfchd(gwf, stress_period_data=chd_spd_right)
oc = flopy.mf6.ModflowGwfoc(
@@ -312,8 +310,8 @@ def add_gwfexchange(sim):
]
for ilay in range(nlay)
for irow in range(nrow)
- if idomain_left[ilay, irow, ncol_left - 1] > 0 and
- idomain_right[ilay, irow, 0] > 0
+ if idomain_left[ilay, irow, ncol_left - 1] > 0
+ and idomain_right[ilay, irow, 0] > 0
]
gwfgwf = flopy.mf6.ModflowGwfgwf(
sim,
@@ -327,31 +325,31 @@ def add_gwfexchange(sim):
)
-def build_model(idx, exdir):
- sim = get_model(idx, exdir)
+def build_models(idx, test):
+ sim = get_model(idx, test.workspace)
return sim, None
-def compare_to_ref(sim):
+def check_output(idx, test):
print("comparing heads to single model reference...")
- fpth = os.path.join(sim.simpath, f"{mname_ref}.hds")
+ fpth = os.path.join(test.workspace, f"{mname_ref}.hds")
hds = flopy.utils.HeadFile(fpth)
- fpth = os.path.join(sim.simpath, f"{mname_left}.hds")
+ fpth = os.path.join(test.workspace, f"{mname_left}.hds")
hds_l = flopy.utils.HeadFile(fpth)
- fpth = os.path.join(sim.simpath, f"{mname_right}.hds")
+ fpth = os.path.join(test.workspace, f"{mname_right}.hds")
hds_r = flopy.utils.HeadFile(fpth)
times = hds.get_times()
for t in times:
heads = hds.get_data(totim=t)
- heads_left = hds_l.get_data(totim=t)
+ heads_left = hds_l.get_data(totim=t)
heads_right = hds_r.get_data(totim=t)
heads_2models = np.append(heads_left, heads_right, axis=2)
# check idomain was used
- assert heads[0, 0, 4] == 1.0e+30, "idomain was set to 0 for this cell"
- assert heads[0, 0, 5] == 1.0e+30, "idomain was set to 0 for this cell"
+ assert heads[0, 0, 4] == 1.0e30, "idomain was set to 0 for this cell"
+ assert heads[0, 0, 5] == 1.0e30, "idomain was set to 0 for this cell"
# compare heads
maxdiff = np.amax(abs(heads - heads_2models))
@@ -364,7 +362,7 @@ def compare_to_ref(sim):
# check budget error from .lst file
for mname in [mname_ref, mname_left, mname_right]:
- fpth = os.path.join(sim.simpath, f"{mname}.lst")
+ fpth = os.path.join(test.workspace, f"{mname}.lst")
for line in open(fpth):
if line.lstrip().startswith("PERCENT"):
cumul_balance_error = float(line.split()[3])
@@ -375,17 +373,14 @@ def compare_to_ref(sim):
)
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
@pytest.mark.developmode
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, exfunc=compare_to_ref, idxsim=idx
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ targets=targets,
)
+ test.run()
diff --git a/autotest/test_gwf_ifmod_mult_exg.py b/autotest/test_gwf_ifmod_mult_exg.py
index dc179b804db..c5593f0b75e 100644
--- a/autotest/test_gwf_ifmod_mult_exg.py
+++ b/autotest/test_gwf_ifmod_mult_exg.py
@@ -1,5 +1,4 @@
"""
-MODFLOW 6 Autotest
Test the interface model approach for multiple (2) exchanges between
the same two models. One exchange has XT3D and the other one doesn't.
@@ -19,18 +18,18 @@
will have the XT3D calculation enabled.
TODO: (how) will this affect accuracy?
-
"""
+
import os
import flopy
import numpy as np
import pytest
from flopy.utils.lgrutil import Lgr
+
from framework import TestFramework
-from simulation import TestSimulation
-ex = ["ifmod_mult_exg"]
+cases = ["ifmod_mult_exg"]
name_parent = "parent"
name_child = "child"
g_delr = 10.0
@@ -41,7 +40,7 @@
def get_model(idx, dir):
- name = ex[idx]
+ name = cases[idx]
# parameters and spd
# tdis
@@ -256,25 +255,25 @@ def get_model(idx, dir):
return sim
-def build_model(idx, exdir):
- sim = get_model(idx, exdir)
+def build_models(idx, test):
+ sim = get_model(idx, test.workspace)
return sim, None
-def eval_heads(sim):
- fpth = os.path.join(sim.simpath, f"{name_parent}.hds")
+def check_output(idx, test):
+ fpth = os.path.join(test.workspace, f"{name_parent}.hds")
hds = flopy.utils.HeadFile(fpth)
heads = hds.get_data()
- fpth = os.path.join(sim.simpath, f"{name_child}.hds")
+ fpth = os.path.join(test.workspace, f"{name_child}.hds")
hds_c = flopy.utils.HeadFile(fpth)
heads_c = hds_c.get_data()
- fpth = os.path.join(sim.simpath, f"{name_parent}.dis.grb")
+ fpth = os.path.join(test.workspace, f"{name_parent}.dis.grb")
grb = flopy.mf6.utils.MfGrdFile(fpth)
mg = grb.modelgrid
- fpth = os.path.join(sim.simpath, f"{name_child}.dis.grb")
+ fpth = os.path.join(test.workspace, f"{name_child}.dis.grb")
grb_c = flopy.mf6.utils.MfGrdFile(fpth)
mg_c = grb_c.modelgrid
@@ -330,17 +329,14 @@ def exact(x):
# assert maxdiff_child_south > maxdiff_child_north
-@pytest.mark.parametrize(
- "name",
- ex,
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
@pytest.mark.developmode
-def test_mf6model(name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, 0, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, exfunc=eval_heads, idxsim=0
- ),
- str(function_tmpdir),
+def test_mf6model(idx, name, function_tmpdir, targets):
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
)
+ test.run()
diff --git a/autotest/test_gwf_ifmod_newton.py b/autotest/test_gwf_ifmod_newton.py
index abfc8e2a3c8..9ae844132e5 100644
--- a/autotest/test_gwf_ifmod_newton.py
+++ b/autotest/test_gwf_ifmod_newton.py
@@ -1,34 +1,36 @@
+"""
+General test for the interface model approach.
+It compares the result of a single reference model
+to the equivalent case where the domain is decomposed
+and joined by a GWF-GWF exchange.
+
+In this case we test newton option, which is also enabled in
+the interface model and should give identical results.
+
+period 1: The first stress period we start almost dry and have the
+ model fill up.
+period 2: The BC on the left is lowered such that a part of the top
+ layer is drained.
+
+ 'refmodel' 'leftmodel' 'rightmodel'
+
+ layer 1: 1 . . . . . . . 1 1 . . . . 1 1 . . 1
+ layer 2: 1 . . . . . . . 1 VS 1 . . . . 1 + 1 . . 1
+ layer 3: 1 . . . . . . . 1 1 . . . . 1 1 . . 1
+
+We assert equality on the head values. All models are part of the same
+solution for convenience. Finally, the budget error is checked.
+"""
+
import os
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
-
-# General test for the interface model approach.
-# It compares the result of a single reference model
-# to the equivalent case where the domain is decomposed
-# and joined by a GWF-GWF exchange.
-#
-# In this case we test newton option, which is also enabled in
-# the interface model and should give identical results.
-#
-# period 1: The first stress period we start almost dry and have the
-# model fill up.
-# period 2: The BC on the left is lowered such that a part of the top
-# layer is drained.
-#
-# 'refmodel' 'leftmodel' 'rightmodel'
-#
-# layer 1: 1 . . . . . . . 1 1 . . . . 1 1 . . 1
-# layer 2: 1 . . . . . . . 1 VS 1 . . . . 1 + 1 . . 1
-# layer 3: 1 . . . . . . . 1 1 . . . . 1 1 . . 1
-#
-# We assert equality on the head values. All models are part of the same
-# solution for convenience. Finally, the budget error is checked.
-
-ex = ["ifmod_newton01"]
+
+cases = ["ifmod_newton01"]
# some global convenience...:
# model names
@@ -109,8 +111,8 @@
chd_spd_right[1] = rchd_right
-def get_model(idx, dir):
- name = ex[idx]
+def get_model(idx, ws):
+ name = cases[idx]
# parameters and spd
# tdis
@@ -123,7 +125,7 @@ def get_model(idx, dir):
hclose, rclose, relax = hclose_check, 1e-3, 0.97
sim = flopy.mf6.MFSimulation(
- sim_name=name, version="mf6", exe_name="mf6", sim_ws=dir
+ sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
tdis = flopy.mf6.ModflowTdis(
@@ -175,8 +177,9 @@ def add_refmodel(sim):
global chd_spd
global tops
- gwf = flopy.mf6.ModflowGwf(sim, modelname=mname_ref, newtonoptions="NEWTON",
- save_flows=True)
+ gwf = flopy.mf6.ModflowGwf(
+ sim, modelname=mname_ref, newtonoptions="NEWTON", save_flows=True
+ )
dis = flopy.mf6.ModflowGwfdis(
gwf,
@@ -226,8 +229,9 @@ def add_leftmodel(sim):
global h_left
global chd_spd_left
- gwf = flopy.mf6.ModflowGwf(sim, modelname=mname_left, newtonoptions="NEWTON",
- save_flows=True)
+ gwf = flopy.mf6.ModflowGwf(
+ sim, modelname=mname_left, newtonoptions="NEWTON", save_flows=True
+ )
dis = flopy.mf6.ModflowGwfdis(
gwf,
nlay=nlay,
@@ -268,8 +272,9 @@ def add_rightmodel(sim):
global shift_x, shift_y
global chd_spd_right
- gwf = flopy.mf6.ModflowGwf(sim, modelname=mname_right, newtonoptions="NEWTON",
- save_flows=True)
+ gwf = flopy.mf6.ModflowGwf(
+ sim, modelname=mname_right, newtonoptions="NEWTON", save_flows=True
+ )
dis = flopy.mf6.ModflowGwfdis(
gwf,
nlay=nlay,
@@ -336,19 +341,19 @@ def add_gwfexchange(sim):
)
-def build_model(idx, exdir):
- sim = get_model(idx, exdir)
+def build_models(idx, test):
+ sim = get_model(idx, test.workspace)
return sim, None
-def compare_to_ref(sim):
+def check_output(idx, test):
print("comparing heads to single model reference...")
- fpth = os.path.join(sim.simpath, f"{mname_ref}.hds")
+ fpth = os.path.join(test.workspace, f"{mname_ref}.hds")
hds = flopy.utils.HeadFile(fpth)
- fpth = os.path.join(sim.simpath, f"{mname_left}.hds")
+ fpth = os.path.join(test.workspace, f"{mname_left}.hds")
hds_l = flopy.utils.HeadFile(fpth)
- fpth = os.path.join(sim.simpath, f"{mname_right}.hds")
+ fpth = os.path.join(test.workspace, f"{mname_right}.hds")
hds_r = flopy.utils.HeadFile(fpth)
times = hds.get_times()
@@ -369,7 +374,7 @@ def compare_to_ref(sim):
# check budget error from .lst file
for mname in [mname_ref, mname_left, mname_right]:
- fpth = os.path.join(sim.simpath, f"{mname}.lst")
+ fpth = os.path.join(test.workspace, f"{mname}.lst")
for line in open(fpth):
if line.lstrip().startswith("PERCENT"):
cumul_balance_error = float(line.split()[3])
@@ -380,17 +385,14 @@ def compare_to_ref(sim):
)
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
@pytest.mark.developmode
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, exfunc=compare_to_ref, idxsim=idx
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
)
+ test.run()
diff --git a/autotest/test_gwf_ifmod_rewet.py b/autotest/test_gwf_ifmod_rewet.py
index feb98489108..42562768c17 100644
--- a/autotest/test_gwf_ifmod_rewet.py
+++ b/autotest/test_gwf_ifmod_rewet.py
@@ -1,36 +1,38 @@
+"""
+General test for the interface model approach.
+It compares the result of a single reference model
+to the equivalent case where the domain is decomposed
+and joined by a GWF-GWF exchange.
+
+In this case we test rewetting, which is also enabled in
+the interface model and should give identical results.
+
+period 1: The first stress period we start almost dry and have the
+ model fill up.
+period 2: The BC on the left is lowered such that a part of the top
+ layer dries. To test the interface, the value is chosen such
+ that the boundary cell on the left is DRY and the one on the
+ right isn't.
+
+ 'refmodel' 'leftmodel' 'rightmodel'
+
+ layer 1: 1 . . . . . . . 1 1 . . . . 1 1 . . 1
+ layer 2: 1 . . . . . . . 1 VS 1 . . . . 1 + 1 . . 1
+ layer 3: 1 . . . . . . . 1 1 . . . . 1 1 . . 1
+
+We assert equality on the head values. All models are part of the same
+solution for convenience. Finally, the budget error is checked.
+"""
+
import os
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
-
-# General test for the interface model approach.
-# It compares the result of a single reference model
-# to the equivalent case where the domain is decomposed
-# and joined by a GWF-GWF exchange.
-#
-# In this case we test rewetting, which is also enabled in
-# the interface model and should give identical results.
-#
-# period 1: The first stress period we start almost dry and have the
-# model fill up.
-# period 2: The BC on the left is lowered such that a part of the top
-# layer dries. To test the interface, the value is chosen such
-# that the boundary cell on the left is DRY and the one on the
-# right isn't.
-#
-# 'refmodel' 'leftmodel' 'rightmodel'
-#
-# layer 1: 1 . . . . . . . 1 1 . . . . 1 1 . . 1
-# layer 2: 1 . . . . . . . 1 VS 1 . . . . 1 + 1 . . 1
-# layer 3: 1 . . . . . . . 1 1 . . . . 1 1 . . 1
-#
-# We assert equality on the head values. All models are part of the same
-# solution for convenience. Finally, the budget error is checked.
-
-ex = ["ifmod_rewet01"]
+
+cases = ["ifmod_rewet01"]
# some global convenience...:
# model names
@@ -116,7 +118,7 @@
def get_model(idx, dir):
- name = ex[idx]
+ name = cases[idx]
# parameters and spd
# tdis
@@ -348,25 +350,25 @@ def add_gwfexchange(sim):
)
-def build_model(idx, exdir):
- sim = get_model(idx, exdir)
+def build_models(idx, test):
+ sim = get_model(idx, test.workspace)
return sim, None
-def compare_to_ref(sim):
+def check_output(idx, test):
print("comparing heads to single model reference...")
- fpth = os.path.join(sim.simpath, f"{mname_ref}.hds")
+ fpth = os.path.join(test.workspace, f"{mname_ref}.hds")
hds = flopy.utils.HeadFile(fpth)
- fpth = os.path.join(sim.simpath, f"{mname_left}.hds")
+ fpth = os.path.join(test.workspace, f"{mname_left}.hds")
hds_l = flopy.utils.HeadFile(fpth)
- fpth = os.path.join(sim.simpath, f"{mname_right}.hds")
+ fpth = os.path.join(test.workspace, f"{mname_right}.hds")
hds_r = flopy.utils.HeadFile(fpth)
times = hds.get_times()
- for iper, t in enumerate(times):
+ for iper, t in enumerate(times):
heads = hds.get_data(totim=t)
- heads_left = hds_l.get_data(totim=t)
+ heads_left = hds_l.get_data(totim=t)
heads_right = hds_r.get_data(totim=t)
heads_2models = np.append(heads_left, heads_right, axis=2)
@@ -374,9 +376,13 @@ def compare_to_ref(sim):
# dry in period 2, but the cells in the right model should remain
# active. This tests the interface model for dealing with drying
# and wetting, and handling inactive cells, explicitly
- if (iper == 1):
- assert np.all(heads_left[0,0,:] == -1.0e+30), "left model, top layer should be DRY in period 2"
- assert np.all(heads_right[0,0,:] > -1.0e+30), "right model, top layer should be WET in period 2"
+ if iper == 1:
+ assert np.all(
+ heads_left[0, 0, :] == -1.0e30
+ ), "left model, top layer should be DRY in period 2"
+ assert np.all(
+ heads_right[0, 0, :] > -1.0e30
+ ), "right model, top layer should be WET in period 2"
# compare heads
maxdiff = np.amax(abs(heads - heads_2models))
@@ -389,7 +395,7 @@ def compare_to_ref(sim):
# check budget error from .lst file
for mname in [mname_ref, mname_left, mname_right]:
- fpth = os.path.join(sim.simpath, f"{mname}.lst")
+ fpth = os.path.join(test.workspace, f"{mname}.lst")
for line in open(fpth):
if line.lstrip().startswith("PERCENT"):
cumul_balance_error = float(line.split()[3])
@@ -400,17 +406,14 @@ def compare_to_ref(sim):
)
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
@pytest.mark.developmode
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, exfunc=compare_to_ref, idxsim=idx
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ targets=targets,
)
+ test.run()
diff --git a/autotest/test_gwf_ifmod_vert.py b/autotest/test_gwf_ifmod_vert.py
index 5e5240726a7..cf480fa2f9a 100644
--- a/autotest/test_gwf_ifmod_vert.py
+++ b/autotest/test_gwf_ifmod_vert.py
@@ -28,18 +28,18 @@
the child model should match the theory. In this case we
just assert that they are equal for each column, something
that is clearly not true when simulating without XT3D.
-
"""
+
import os
import flopy
import numpy as np
import pytest
from flopy.utils.lgrutil import Lgr
+
from framework import TestFramework
-from simulation import TestSimulation
-ex = ["ifmod_vert"]
+cases = ["ifmod_vert"]
parent_name = "parent"
child_name = "child"
@@ -55,7 +55,7 @@ def get_model(idx, dir):
global child_domain
global hclose
- name = ex[idx]
+ name = cases[idx]
# tdis period data
nper = 1
@@ -234,19 +234,19 @@ def get_model(idx, dir):
return sim
-def build_model(idx, exdir):
- sim = get_model(idx, exdir)
+def build_models(idx, test):
+ sim = get_model(idx, test.workspace)
return sim, None
-def eval_heads(sim):
+def check_output(idx, test):
print("comparing heads for child model to analytical result...")
- fpth = os.path.join(sim.simpath, f"{child_name}.hds")
+ fpth = os.path.join(test.workspace, f"{child_name}.hds")
hds_c = flopy.utils.HeadFile(fpth)
heads_c = hds_c.get_data()
- fpth = os.path.join(sim.simpath, f"{child_name}.dis.grb")
+ fpth = os.path.join(test.workspace, f"{child_name}.dis.grb")
grb_c = flopy.mf6.utils.MfGrdFile(fpth)
# (note that without XT3D on the exchange, the 'error'
@@ -262,11 +262,11 @@ def eval_heads(sim):
for mname in [parent_name, child_name]:
print(f"Checking flowja residual for model {mname}")
- fpth = os.path.join(sim.simpath, f"{mname}.dis.grb")
+ fpth = os.path.join(test.workspace, f"{mname}.dis.grb")
grb = flopy.mf6.utils.MfGrdFile(fpth)
ia = grb._datadict["IA"] - 1
- fpth = os.path.join(sim.simpath, f"{mname}.cbc")
+ fpth = os.path.join(test.workspace, f"{mname}.cbc")
assert os.path.isfile(fpth)
cbb = flopy.utils.CellBudgetFile(fpth, precision="double")
flow_ja_face = cbb.get_data(idx=0)
@@ -281,17 +281,14 @@ def eval_heads(sim):
assert np.allclose(res, 0.0, atol=1.0e-6), errmsg
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
@pytest.mark.developmode
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, exfunc=eval_heads, idxsim=idx
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
)
+ test.run()
diff --git a/autotest/test_gwf_ifmod_xt3d01.py b/autotest/test_gwf_ifmod_xt3d01.py
index bcff3b72dc8..2da24e89601 100644
--- a/autotest/test_gwf_ifmod_xt3d01.py
+++ b/autotest/test_gwf_ifmod_xt3d01.py
@@ -1,37 +1,39 @@
+"""
+Test the interface model approach, when running
+with a GWF-GWF exchange and XT3D applied on it.
+It compares the result for a simple LGR configuration
+to the analytical values:
+
+ 1 1 1 1 1 1 1
+ 1 1 1 1 1 1 1
+ 1 1 0 0 0 1 1
+(H=1.0) 1 1 0 0 0 1 1 (H=0.0)
+ 1 1 0 0 0 1 1
+ 1 1 1 1 1 1 1
+ 1 1 1 1 1 1 1
+
+with the region with ibound == 0 being simulated on the
+a refined, 9x9 grid.
+
+This is also the first test problem presented in
+the MODFLOW-USG manual: 'test006_2models'
+
+When running without XT3D, the results will disagree
+with theory because the CVFD requirements are violated at the
+at the LGR interface. We compare heads, specific discharge, and
+confirm that there is no budget error.
+"""
+
import os
import flopy
import numpy as np
import pytest
from flopy.utils.lgrutil import Lgr
+
from framework import TestFramework
-from simulation import TestSimulation
-
-# Test for the interface model approach, when running
-# with a GWF-GWF exchange and XT3D applied on it.
-# It compares the result for a simple LGR configuration
-# to the analytical values:
-#
-# 1 1 1 1 1 1 1
-# 1 1 1 1 1 1 1
-# 1 1 0 0 0 1 1
-# (H=1.0) 1 1 0 0 0 1 1 (H=0.0)
-# 1 1 0 0 0 1 1
-# 1 1 1 1 1 1 1
-# 1 1 1 1 1 1 1
-#
-# with the region with ibound == 0 being simulated on the
-# a refined, 9x9 grid.
-#
-# This is also the first test problem presented in
-# the MODFLOW-USG manual: 'test006_2models'
-#
-# When running without XT3D, the results will disagree
-# with theory because the CVFD requirements are violated at the
-# at the LGR interface. We compare heads, specific discharge, and
-# confirm that there is no budget error.
-
-ex = ["ifmod_xt3d01"]
+
+cases = ["ifmod_xt3d01"]
# globally for convenience...
useXT3D = True
@@ -51,7 +53,7 @@ def get_model(idx, dir):
global child_domain
global hclose
- name = ex[idx]
+ name = cases[idx]
# tdis period data
nper = 1
@@ -274,8 +276,8 @@ def get_model(idx, dir):
return sim
-def build_model(idx, exdir):
- sim = get_model(idx, exdir)
+def build_models(idx, test):
+ sim = get_model(idx, test.workspace)
return sim, None
@@ -299,30 +301,30 @@ def qxqyqz(fname, nlay, nrow, ncol):
return qx, qy, qz
-def eval_heads(sim):
+def check_output(idx, test):
print("comparing heads and spec. discharges to analytical result...")
- fpth = os.path.join(sim.simpath, f"{parent_name}.hds")
+ fpth = os.path.join(test.workspace, f"{parent_name}.hds")
hds = flopy.utils.HeadFile(fpth)
heads = hds.get_data()
- fpth = os.path.join(sim.simpath, f"{parent_name}.cbc")
+ fpth = os.path.join(test.workspace, f"{parent_name}.cbc")
nlay, nrow, ncol = heads.shape
qxb, qyb, qzb = qxqyqz(fpth, nlay, nrow, ncol)
- fpth = os.path.join(sim.simpath, f"{child_name}.hds")
+ fpth = os.path.join(test.workspace, f"{child_name}.hds")
hds_c = flopy.utils.HeadFile(fpth)
heads_c = hds_c.get_data()
- fpth = os.path.join(sim.simpath, f"{child_name}.cbc")
+ fpth = os.path.join(test.workspace, f"{child_name}.cbc")
nlay, nrow, ncol = heads_c.shape
qxb_c, qyb_c, qzb_c = qxqyqz(fpth, nlay, nrow, ncol)
- fpth = os.path.join(sim.simpath, f"{parent_name}.dis.grb")
+ fpth = os.path.join(test.workspace, f"{parent_name}.dis.grb")
grb = flopy.mf6.utils.MfGrdFile(fpth)
mg = grb.modelgrid
- fpth = os.path.join(sim.simpath, f"{child_name}.dis.grb")
+ fpth = os.path.join(test.workspace, f"{child_name}.dis.grb")
grb_c = flopy.mf6.utils.MfGrdFile(fpth)
mg_c = grb_c.modelgrid
@@ -406,7 +408,7 @@ def exact(x):
# todo: mflistbudget
# check cumulative balance error from .lst file
for mname in [parent_name, child_name]:
- fpth = os.path.join(sim.simpath, f"{mname}.lst")
+ fpth = os.path.join(test.workspace, f"{mname}.lst")
for line in open(fpth):
if line.lstrip().startswith("PERCENT"):
cumul_balance_error = float(line.split()[3])
@@ -419,7 +421,7 @@ def exact(x):
# Check on residual, which is stored in diagonal position of
# flow-ja-face. Residual should be less than convergence tolerance,
# or this means the residual term is not added correctly.
- fpth = os.path.join(sim.simpath, f"{parent_name}.cbc")
+ fpth = os.path.join(test.workspace, f"{parent_name}.cbc")
cbb = flopy.utils.CellBudgetFile(fpth)
flow_ja_face = cbb.get_data(idx=0)
assert (
@@ -433,14 +435,14 @@ def exact(x):
assert np.allclose(res, 0.0, atol=1.0e-6), errmsg
# Read gwf-gwf observations values
- fpth = os.path.join(sim.simpath, "gwf_obs.csv")
+ fpth = os.path.join(test.workspace, "gwf_obs.csv")
with open(fpth) as f:
lines = f.readlines()
obsnames = [name for name in lines[0].strip().split(",")[1:]]
obsvalues = [float(v) for v in lines[1].strip().split(",")[1:]]
# Extract the gwf-gwf flows stored in parent budget file
- fpth = os.path.join(sim.simpath, f"{parent_name}.cbc")
+ fpth = os.path.join(test.workspace, f"{parent_name}.cbc")
cbb = flopy.utils.CellBudgetFile(fpth, precision="double")
parent_exchange_flows = cbb.get_data(
kstpkper=(0, 0), text="FLOW-JA-FACE", paknam="GWF-GWF_1"
@@ -448,7 +450,7 @@ def exact(x):
parent_exchange_flows = parent_exchange_flows["q"]
# Extract the gwf-gwf flows stored in child budget file
- fpth = os.path.join(sim.simpath, f"{child_name}.cbc")
+ fpth = os.path.join(test.workspace, f"{child_name}.cbc")
cbb = flopy.utils.CellBudgetFile(fpth, precision="double")
child_exchange_flows = cbb.get_data(
kstpkper=(0, 0), text="FLOW-JA-FACE", paknam="GWF-GWF_1"
@@ -464,7 +466,7 @@ def exact(x):
), "exchange observations do not match child exchange flows"
# Read the lumped boundname observations values
- fpth = os.path.join(sim.simpath, "gwf_obs_boundnames.csv")
+ fpth = os.path.join(test.workspace, "gwf_obs_boundnames.csv")
with open(fpth) as f:
lines = f.readlines()
obsnames = [name for name in lines[0].strip().split(",")[1:]]
@@ -474,16 +476,13 @@ def exact(x):
), "boundname observations do not match expected results"
-@pytest.mark.parametrize(
- "name",
- ex,
-)
-def test_mf6model(name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, 0, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, exfunc=eval_heads, idxsim=0
- ),
- str(function_tmpdir),
+@pytest.mark.parametrize("idx, name", enumerate(cases))
+def test_mf6model(idx, name, function_tmpdir, targets):
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
)
+ test.run()
diff --git a/autotest/test_gwf_ifmod_xt3d02.py b/autotest/test_gwf_ifmod_xt3d02.py
index b95c43b931b..e4876486ae0 100644
--- a/autotest/test_gwf_ifmod_xt3d02.py
+++ b/autotest/test_gwf_ifmod_xt3d02.py
@@ -1,49 +1,49 @@
+"""
+Test the interface model approach.
+It compares the result of a single, strongly anisotropic model
+with XT3D enabled to the equivalent case where the domain is
+decomposed and joined by a GWF-GWF exchange with XT3D applied.
+
+ 'refmodel' 'leftmodel' 'rightmodel'
+
+ 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
+ 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
+ 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
+ 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
+ 1 1 1 1 1 1 1 1 1 1 VS 1 1 1 1 1 + 1 1 1 1 1
+ 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
+ 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
+ 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
+ 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
+ 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
+
+The head values should always be indentical. All models are
+part of the same solution for convenience.
+In addition, a check on the x,y,z components of specific discharge
+is present. The values of the left submodel are compared to
+the left part of the full model, and similar for right: they
+should be identical. Finally, the budget error is checked.
+"""
+
import os
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
-
-# Test for the interface model approach.
-# It compares the result of a single, strongly anisotropic model
-# with XT3D enabled to the equivalent case where the domain is
-# decomposed and joined by a GWF-GWF exchange with XT3D applied.
-#
-# 'refmodel' 'leftmodel' 'rightmodel'
-#
-# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-# 1 1 1 1 1 1 1 1 1 1 VS 1 1 1 1 1 + 1 1 1 1 1
-# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-#
-# The head values should always be indentical. All models are
-# part of the same solution for convenience.
-# In addition, a check on the x,y,z components of specific discharge
-# is present. The values of the left submodel are compared to
-# the left part of the full model, and similar for right: they
-# should be identical. Finally, the budget error is checked.
-
-ex = ["ifmod_xt3d02"]
-# global convenience...
+
+cases = ["ifmod_xt3d02"]
mname_ref = "refmodel"
mname_left = "leftmodel"
mname_right = "rightmodel"
hclose_check = 1e-9
max_inner_it = 300
-
useXT3D = True
def get_model(idx, dir):
- name = ex[idx]
+ name = cases[idx]
# parameters and spd
# tdis
@@ -284,8 +284,8 @@ def get_model(idx, dir):
return sim
-def build_model(idx, exdir):
- sim = get_model(idx, exdir)
+def build_models(idx, test):
+ sim = get_model(idx, test.workspace)
return sim, None
@@ -309,27 +309,27 @@ def qxqyqz(fname, nlay, nrow, ncol):
return qx, qy, qz
-def compare_to_ref(sim):
+def check_output(idx, test):
print("comparing heads and spec. discharge to single model reference...")
- fpth = os.path.join(sim.simpath, f"{mname_ref}.hds")
+ fpth = os.path.join(test.workspace, f"{mname_ref}.hds")
hds = flopy.utils.HeadFile(fpth)
heads = hds.get_data()
- fpth = os.path.join(sim.simpath, f"{mname_ref}.cbc")
+ fpth = os.path.join(test.workspace, f"{mname_ref}.cbc")
nlay, nrow, ncol = heads.shape
qxb, qyb, qzb = qxqyqz(fpth, nlay, nrow, ncol)
- fpth = os.path.join(sim.simpath, f"{mname_left}.hds")
+ fpth = os.path.join(test.workspace, f"{mname_left}.hds")
hds = flopy.utils.HeadFile(fpth)
heads_left = hds.get_data()
- fpth = os.path.join(sim.simpath, f"{mname_left}.cbc")
+ fpth = os.path.join(test.workspace, f"{mname_left}.cbc")
nlay, nrow, ncol = heads_left.shape
qxb_left, qyb_left, qzb_left = qxqyqz(fpth, nlay, nrow, ncol)
- fpth = os.path.join(sim.simpath, f"{mname_right}.hds")
+ fpth = os.path.join(test.workspace, f"{mname_right}.hds")
hds = flopy.utils.HeadFile(fpth)
heads_right = hds.get_data()
- fpth = os.path.join(sim.simpath, f"{mname_right}.cbc")
+ fpth = os.path.join(test.workspace, f"{mname_right}.cbc")
nlay, nrow, ncol = heads_right.shape
qxb_right, qyb_right, qzb_right = qxqyqz(fpth, nlay, nrow, ncol)
@@ -400,7 +400,7 @@ def compare_to_ref(sim):
# check budget error from .lst file
for mname in [mname_ref, mname_left, mname_right]:
- fpth = os.path.join(sim.simpath, f"{mname}.lst")
+ fpth = os.path.join(test.workspace, f"{mname}.lst")
for line in open(fpth):
if line.lstrip().startswith("PERCENT"):
cumul_balance_error = float(line.split()[3])
@@ -411,16 +411,13 @@ def compare_to_ref(sim):
)
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, exfunc=compare_to_ref, idxsim=idx
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
)
+ test.run()
diff --git a/autotest/test_gwf_ifmod_xt3d03.py b/autotest/test_gwf_ifmod_xt3d03.py
new file mode 100644
index 00000000000..e942d03d393
--- /dev/null
+++ b/autotest/test_gwf_ifmod_xt3d03.py
@@ -0,0 +1,446 @@
+"""
+Test the interface model approach.
+It compares the result of a single, strongly anisotropic model
+with XT3D enabled to the equivalent case where the domain is
+decomposed into 4 models connected with GWF-GWF exchanges all
+having XT3D enabled. Note the location of the well W, in the
+bottom right corner of model "tl" (and also in "ref" of course)
+
+ 'ref' 'tl' 'tr'
+
+ 1 1 1 1 1 1 1 1 1 1
+ 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
+ 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 + 1 1 1 1 1
+ 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
+ 1 1 1 1 1 1 1 1 1 1 1 1 1 1 W 1 1 1 1 1
+ 1 1 1 1 W 1 1 1 1 1
+ 1 1 1 1 1 1 1 1 1 1 VS + +
+ 1 1 1 1 1 1 1 1 1 1
+ 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
+ 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
+ 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 + 1 1 1 1 1
+ 1 1 1 1 1 1 1 1 1 1
+ 1 1 1 1 1 1 1 1 1 1
+
+ 'bl' 'br'
+
+The head values should always be indentical. All models are
+part of the same solution for convenience.
+In addition, a check on the x,y,z components of specific discharge
+is present. Finally, the budget error is checked.
+"""
+
+import os
+from types import SimpleNamespace
+
+import flopy
+import numpy as np
+import pytest
+
+from framework import TestFramework
+
+cases = ["ifmod_xt3d03"]
+
+hclose_check = 1e-9
+max_inner_it = 300
+useXT3D = True
+
+# model spatial discretization
+nlay = 1
+ncol = 10
+ncol_split = 5
+nrow = 10
+nrow_split = 5
+
+# cell spacing
+delr = 10.0
+delc = 10.0
+area = delr * delc
+
+# shift (hor. and vert.)
+shift_some_x = -20 * delr # avoids overlap
+shift_x = 5 * delr
+shift_y = 5 * delc
+
+# top/bot of the aquifer
+tops = [0.0, -5.0]
+
+# hydraulic conductivity
+k11 = 10.0
+k22 = 0.1
+k_angle = 45.0
+
+# boundary stress period data
+h_left = -2.0
+h_right = -2.0
+
+# initial head
+h_start = -2.0
+
+# well
+well_id = (0, 4, 4)
+well_rate = -1.0
+
+
+def get_model(idx, dir):
+ name = cases[idx]
+
+ # parameters and spd
+ # tdis
+ nper = 1
+ tdis_rc = []
+ for i in range(nper):
+ tdis_rc.append((1.0, 1, 1))
+
+ # solver data
+ nouter, ninner = 100, max_inner_it
+ hclose, rclose, relax = hclose_check, 1e-3, 0.97
+
+ sim = flopy.mf6.MFSimulation(
+ sim_name=name, version="mf6", exe_name="mf6", sim_ws=dir
+ )
+
+ tdis = flopy.mf6.ModflowTdis(
+ sim, time_units="DAYS", nper=nper, perioddata=tdis_rc
+ )
+
+ ims = flopy.mf6.ModflowIms(
+ sim,
+ print_option="SUMMARY",
+ outer_dvclose=hclose,
+ outer_maximum=nouter,
+ under_relaxation="DBD",
+ inner_maximum=ninner,
+ inner_dvclose=hclose,
+ rcloserecord=rclose,
+ linear_acceleration="BICGSTAB",
+ relaxation_factor=relax,
+ )
+
+ # reference model
+ dis_params = SimpleNamespace(
+ nlay=nlay,
+ nrow=nrow,
+ ncol=ncol,
+ delr=delr,
+ delc=delc,
+ xorigin=shift_some_x,
+ yorigin=0.0,
+ tops=tops,
+ )
+ create_gwf_model(sim, "ref", dis_params)
+
+ # top-left model
+ dis_params.nrow = nrow_split
+ dis_params.ncol = ncol_split
+ dis_params.xorigin = 0.0
+ dis_params.yorigin = shift_y
+ create_gwf_model(sim, "tl", dis_params)
+
+ # bottom-left model
+ dis_params.xorigin = 0.0
+ dis_params.yorigin = 0.0
+ create_gwf_model(sim, "bl", dis_params)
+
+ # top-right model
+ dis_params.xorigin = shift_x
+ dis_params.yorigin = shift_y
+ create_gwf_model(sim, "tr", dis_params)
+
+ # bottom-right model
+ dis_params.xorigin = shift_x
+ dis_params.yorigin = 0.0
+ create_gwf_model(sim, "br", dis_params)
+
+ # two types of exchange data:
+ # tl-tr, bl-br (0 deg)
+ lr_data = [
+ [
+ (0, irow, ncol_split - 1),
+ (0, irow, 0),
+ 1,
+ delr / 2.0,
+ delr / 2.0,
+ delc,
+ 0.0,
+ delr,
+ ]
+ for irow in range(nrow_split)
+ ]
+ # tl-bl, tr-br (270 deg)
+ tb_data = [
+ [
+ (0, nrow_split - 1, icol),
+ (0, 0, icol),
+ 1,
+ delc / 2.0,
+ delc / 2.0,
+ delr,
+ 270.0,
+ delc,
+ ]
+ for icol in range(ncol_split)
+ ]
+
+ # set up 4 exchanges
+ # tl-tr
+ gwfgwf = flopy.mf6.ModflowGwfgwf(
+ sim,
+ exgtype="GWF6-GWF6",
+ nexg=len(lr_data),
+ exgmnamea="tl",
+ exgmnameb="tr",
+ exchangedata=lr_data,
+ auxiliary=["ANGLDEGX", "CDIST"],
+ xt3d=useXT3D,
+ filename="tltr.exg",
+ )
+
+ # bl-br
+ gwfgwf = flopy.mf6.ModflowGwfgwf(
+ sim,
+ exgtype="GWF6-GWF6",
+ nexg=len(lr_data),
+ exgmnamea="bl",
+ exgmnameb="br",
+ exchangedata=lr_data,
+ auxiliary=["ANGLDEGX", "CDIST"],
+ xt3d=useXT3D,
+ filename="blbr.exg",
+ )
+
+ # tl-bl
+ gwfgwf = flopy.mf6.ModflowGwfgwf(
+ sim,
+ exgtype="GWF6-GWF6",
+ nexg=len(tb_data),
+ exgmnamea="tl",
+ exgmnameb="bl",
+ exchangedata=tb_data,
+ auxiliary=["ANGLDEGX", "CDIST"],
+ xt3d=useXT3D,
+ filename="tlbl.exg",
+ )
+
+ # tr-br
+ gwfgwf = flopy.mf6.ModflowGwfgwf(
+ sim,
+ exgtype="GWF6-GWF6",
+ nexg=len(tb_data),
+ exgmnamea="tr",
+ exgmnameb="br",
+ exchangedata=tb_data,
+ auxiliary=["ANGLDEGX", "CDIST"],
+ xt3d=useXT3D,
+ filename="trbr.exg",
+ )
+
+ return sim
+
+
+def create_gwf_model(sim, mname, dis_params):
+ gwf = flopy.mf6.ModflowGwf(sim, modelname=mname, save_flows=True)
+
+ dis_params = flopy.mf6.ModflowGwfdis(
+ gwf,
+ nlay=dis_params.nlay,
+ nrow=dis_params.nrow,
+ ncol=dis_params.ncol,
+ delr=dis_params.delr,
+ delc=dis_params.delc,
+ xorigin=dis_params.xorigin,
+ yorigin=dis_params.yorigin,
+ top=dis_params.tops[0],
+ botm=dis_params.tops[1:],
+ )
+
+ # initial conditions
+ ic = flopy.mf6.ModflowGwfic(gwf, strt=h_start)
+
+ # node property flow
+ npf = flopy.mf6.ModflowGwfnpf(
+ gwf,
+ save_specific_discharge=True,
+ xt3doptions=useXT3D,
+ save_flows=True,
+ icelltype=0,
+ k=k11,
+ k22=k22,
+ angle1=k_angle,
+ )
+
+ # chd file
+ left_chd = []
+ right_chd = []
+ if mname == "ref":
+ left_chd = [[(0, irow, 0), h_left] for irow in range(nrow)]
+ right_chd = [[(0, irow, ncol - 1), h_right] for irow in range(nrow)]
+ elif mname == "tl" or mname == "bl":
+ left_chd = [[(0, irow, 0), h_left] for irow in range(nrow_split)]
+ right_chd = []
+ elif mname == "tr" or mname == "br":
+ left_chd = []
+ right_chd = [
+ [(0, irow, ncol_split - 1), h_right] for irow in range(nrow_split)
+ ]
+ chd_data = left_chd + right_chd
+ chd_spd = {0: chd_data}
+ chd = flopy.mf6.ModflowGwfchd(gwf, stress_period_data=chd_spd)
+
+ # well in top-left corner
+ if mname == "ref" or mname == "tl":
+ wel1 = flopy.mf6.ModflowGwfwel(
+ gwf,
+ stress_period_data=[[well_id, well_rate]],
+ print_input=True,
+ print_flows=True,
+ save_flows=False,
+ pname="WEL-1",
+ )
+
+ # output control
+ oc = flopy.mf6.ModflowGwfoc(
+ gwf,
+ head_filerecord=f"{mname}.hds",
+ budget_filerecord=f"{mname}.cbc",
+ headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")],
+ saverecord=[("HEAD", "LAST"), ("BUDGET", "LAST")],
+ )
+
+
+def build_models(idx, test):
+ sim = get_model(idx, test.workspace)
+ return sim, None
+
+
+def qxqyqz(fname, nlay, nrow, ncol):
+ nodes = nlay * nrow * ncol
+ cbb = flopy.utils.CellBudgetFile(fname, precision="double")
+ spdis = cbb.get_data(text="DATA-SPDIS")[0]
+ qx = np.ones((nodes), dtype=float) * 1.0e30
+ qy = np.ones((nodes), dtype=float) * 1.0e30
+ qz = np.ones((nodes), dtype=float) * 1.0e30
+ n0 = spdis["node"] - 1
+ qx[n0] = spdis["qx"]
+ qy[n0] = spdis["qy"]
+ qz[n0] = spdis["qz"]
+ qx = qx.reshape(nlay, nrow, ncol)
+ qy = qy.reshape(nlay, nrow, ncol)
+ qz = qz.reshape(nlay, nrow, ncol)
+ qx = np.ma.masked_equal(qx, 1.0e30)
+ qy = np.ma.masked_equal(qy, 1.0e30)
+ qz = np.ma.masked_equal(qz, 1.0e30)
+ return qx, qy, qz
+
+
+def check_output(idx, test):
+ fpth = os.path.join(test.workspace, f"ref.hds")
+ hds = flopy.utils.HeadFile(fpth)
+ heads = hds.get_data()
+ fpth = os.path.join(test.workspace, f"ref.cbc")
+ nlay, nrow, ncol = heads.shape
+ qx, qy, qz = qxqyqz(fpth, nlay, nrow, ncol)
+
+ fpth = os.path.join(test.workspace, f"tl.hds")
+ hds = flopy.utils.HeadFile(fpth)
+ heads_tl = hds.get_data()
+ fpth = os.path.join(test.workspace, f"tl.cbc")
+ nlay, nrow, ncol = heads_tl.shape
+ qx_tl, qy_tl, qz_tl = qxqyqz(fpth, nlay, nrow, ncol)
+
+ fpth = os.path.join(test.workspace, f"tr.hds")
+ hds = flopy.utils.HeadFile(fpth)
+ heads_tr = hds.get_data()
+ fpth = os.path.join(test.workspace, f"tr.cbc")
+ nlay, nrow, ncol = heads_tr.shape
+ qx_tr, qy_tr, qz_tr = qxqyqz(fpth, nlay, nrow, ncol)
+
+ fpth = os.path.join(test.workspace, f"bl.hds")
+ hds = flopy.utils.HeadFile(fpth)
+ heads_bl = hds.get_data()
+ fpth = os.path.join(test.workspace, f"bl.cbc")
+ nlay, nrow, ncol = heads_bl.shape
+ qx_bl, qy_bl, qz_bl = qxqyqz(fpth, nlay, nrow, ncol)
+
+ fpth = os.path.join(test.workspace, f"br.hds")
+ hds = flopy.utils.HeadFile(fpth)
+ heads_br = hds.get_data()
+ fpth = os.path.join(test.workspace, f"br.cbc")
+ nlay, nrow, ncol = heads_br.shape
+ qx_br, qy_br, qz_br = qxqyqz(fpth, nlay, nrow, ncol)
+
+ heads_top = np.append(heads_tl[0], heads_tr[0], axis=1)
+ heads_bot = np.append(heads_bl[0], heads_br[0], axis=1)
+ heads_merged = np.append(heads_top, heads_bot, axis=0)
+
+ # compare heads
+ maxdiff = np.amax(abs(heads - heads_merged))
+ assert (
+ maxdiff < 10 * hclose_check
+ ), "Max. head diff. {} should \
+ be within solver tolerance (x10): {}".format(
+ maxdiff, 10 * hclose_check
+ )
+
+ # compare spdis-x
+ qx_top = np.append(qx_tl[0], qx_tr[0], axis=1)
+ qx_bot = np.append(qx_bl[0], qx_br[0], axis=1)
+ qx_merged = np.append(qx_top, qx_bot, axis=0)
+
+ maxdiff = np.amax(abs(qx - qx_merged))
+ assert (
+ maxdiff < 10 * hclose_check
+ ), "Max. diff. in spec. discharge (x) {} \
+ should be within solver tolerance (x10): {}".format(
+ maxdiff, 10 * hclose_check
+ )
+
+ # compare spdis-y
+ qy_top = np.append(qy_tl[0], qy_tr[0], axis=1)
+ qy_bot = np.append(qy_bl[0], qy_br[0], axis=1)
+ qy_merged = np.append(qy_top, qy_bot, axis=0)
+
+ maxdiff = np.amax(abs(qy - qy_merged))
+ assert (
+ maxdiff < 10 * hclose_check
+ ), "Max. diff. in spec. discharge (y) {} \
+ should be within solver tolerance (x10): {}".format(
+ maxdiff, 10 * hclose_check
+ )
+
+ # compare spdis-z
+ qz_top = np.append(qz_tl[0], qz_tr[0], axis=1)
+ qz_bot = np.append(qz_bl[0], qz_br[0], axis=1)
+ qz_merged = np.append(qz_top, qz_bot, axis=0)
+
+ maxdiff = np.amax(abs(qz - qz_merged))
+ assert (
+ maxdiff < 10 * hclose_check
+ ), "Max. diff. in spec. discharge (z) {} \
+ should be within solver tolerance (x10): {}".format(
+ maxdiff, 10 * hclose_check
+ )
+
+ # check budget error from .lst file
+ for mname in ["ref", "tl", "tr", "bl", "br"]:
+ fpth = os.path.join(test.workspace, f"{mname}.lst")
+ for line in open(fpth):
+ if line.lstrip().startswith("PERCENT"):
+ cumul_balance_error = float(line.split()[3])
+ assert (
+ abs(cumul_balance_error) < 0.00001
+ ), "Cumulative balance error = {} for {}, should equal 0.0".format(
+ cumul_balance_error, mname
+ )
+
+
+@pytest.mark.parametrize("idx, name", enumerate(cases))
+def test_mf6model(idx, name, function_tmpdir, targets):
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ )
+ test.run()
diff --git a/autotest/test_gwf_ims_rcm_reorder.py b/autotest/test_gwf_ims_rcm_reorder.py
index b6b587e5b38..c5f39010094 100644
--- a/autotest/test_gwf_ims_rcm_reorder.py
+++ b/autotest/test_gwf_ims_rcm_reorder.py
@@ -3,11 +3,12 @@
import flopy
import pytest
from flopy.utils.compare import eval_bud_diff
+
from framework import TestFramework
-from simulation import TestSimulation
paktest = "ims"
-ex = ["ims_rcm"]
+cases = ["ims_rcm"]
+cmp_prefix = "mf6"
# spatial discretization data
nlay, nrow, ncol = 2, 5, 30
@@ -26,7 +27,7 @@ def build_model(idx, ws):
tdis_rc = [(1.0, 1, 1.0)]
# build MODFLOW 6 files
- name = ex[idx]
+ name = cases[idx]
sim = flopy.mf6.MFSimulation(
sim_name=name,
version="mf6",
@@ -41,7 +42,7 @@ def build_model(idx, ws):
perioddata=tdis_rc,
)
- if not ws.endswith("mf6"):
+ if not str(ws).endswith(cmp_prefix):
reordering_method = "rcm"
else:
reordering_method = None
@@ -111,29 +112,24 @@ def build_model(idx, ws):
return sim
-def build_models(idx, base_ws):
- sim = build_model(idx, base_ws)
-
- ws = os.path.join(base_ws, "mf6")
- mc = build_model(idx, ws)
-
- return sim, mc
-
+def build_models(idx, test):
+ return build_model(idx, test.workspace), build_model(
+ idx, os.path.join(test.workspace, cmp_prefix)
+ )
-def eval_flows(sim):
- name = sim.name
- print("evaluating flow results..." f"({name})")
- fpth = os.path.join(sim.simpath, f"{name}.dis.grb")
+def check_output(idx, test):
+ name = test.name
+ fpth = os.path.join(test.workspace, f"{name}.dis.grb")
ia = flopy.mf6.utils.MfGrdFile(fpth).ia
- fpth = os.path.join(sim.simpath, f"{name}.cbc")
+ fpth = os.path.join(test.workspace, f"{name}.cbc")
b0 = flopy.utils.CellBudgetFile(fpth, precision="double")
- fpth = os.path.join(sim.simpath, "mf6", f"{name}.cbc")
+ fpth = os.path.join(test.workspace, cmp_prefix, f"{name}.cbc")
b1 = flopy.utils.CellBudgetFile(fpth, precision="double")
- fpth = os.path.join(sim.simpath, f"{name}.cbc.cmp.out")
+ fpth = os.path.join(test.workspace, f"{name}.cbc.cmp.out")
eval_bud_diff(fpth, b0, b1, ia=ia)
# close the budget files
@@ -141,20 +137,13 @@ def eval_flows(sim):
b1.close()
-@pytest.mark.parametrize(
- "name",
- ex,
-)
-def test_mf6model(name, function_tmpdir, targets):
- ws = str(function_tmpdir)
- test = TestFramework()
- test.build(build_models, 0, ws)
- test.run(
- TestSimulation(
- name=name,
- exe_dict=targets,
- exfunc=eval_flows,
- idxsim=0,
- ),
- ws,
+@pytest.mark.parametrize("idx, name", enumerate(cases))
+def test_mf6model(idx, name, function_tmpdir, targets):
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
)
+ test.run()
diff --git a/autotest/test_gwf_lak_bedleak.py b/autotest/test_gwf_lak_bedleak.py
new file mode 100644
index 00000000000..6a3a42d6830
--- /dev/null
+++ b/autotest/test_gwf_lak_bedleak.py
@@ -0,0 +1,179 @@
+import os
+
+import flopy
+import numpy as np
+import pytest
+
+from framework import DNODATA, TestFramework
+
+cases = ["bedleak", "bedleak_fail", "bedleak_none"]
+
+
+def build_models(idx, test):
+ nlay, nrow, ncol = 1, 10, 10
+ nper = 1
+ perlen = [
+ 1.0,
+ ]
+ nstp = [
+ 1,
+ ]
+ tsmult = [
+ 1.0,
+ ]
+
+ lenx = 300.0
+ delr = delc = lenx / float(nrow)
+ strt = 100.0
+
+ nouter, ninner = 100, 300
+ hclose, rclose, relax = 1e-9, 1e-3, 0.97
+
+ tdis_rc = []
+ for i in range(nper):
+ tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
+
+ name = cases[idx]
+
+ # build MODFLOW 6 files
+ ws = test.workspace
+ sim = flopy.mf6.MFSimulation(
+ sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
+ )
+ # create tdis package
+ tdis = flopy.mf6.ModflowTdis(
+ sim,
+ time_units="DAYS",
+ nper=nper,
+ perioddata=tdis_rc,
+ )
+
+ # create iterative model solution and register the gwf model with it
+ ims = flopy.mf6.ModflowIms(
+ sim,
+ print_option="SUMMARY",
+ outer_dvclose=hclose,
+ outer_maximum=nouter,
+ under_relaxation="DBD",
+ inner_maximum=ninner,
+ inner_dvclose=hclose,
+ rcloserecord=rclose,
+ linear_acceleration="BICGSTAB",
+ scaling_method="NONE",
+ reordering_method="NONE",
+ relaxation_factor=relax,
+ )
+
+ # create gwf model
+ gwf = flopy.mf6.ModflowGwf(sim, modelname=name)
+
+ dis = flopy.mf6.ModflowGwfdis(
+ gwf,
+ nlay=nlay,
+ nrow=nrow,
+ ncol=ncol,
+ delr=delr,
+ delc=delc,
+ top=90.0,
+ botm=0.0,
+ )
+
+ # initial conditions
+ ic = flopy.mf6.ModflowGwfic(gwf, strt=strt)
+
+ # node property flow
+ npf = flopy.mf6.ModflowGwfnpf(
+ gwf, save_flows=True, icelltype=1, k=1.0, k33=0.01
+ )
+ # storage
+ sto = flopy.mf6.ModflowGwfsto(
+ gwf,
+ save_flows=True,
+ iconvert=1,
+ ss=0.0,
+ sy=0.1,
+ steady_state={0: True},
+ )
+
+ # chd files
+ chdlist0 = []
+ chdlist0.append([(0, 0, 0), 100.0])
+ chdlist0.append([(0, nrow - 1, ncol - 1), 95.0])
+
+ chdspdict = {0: chdlist0}
+ chd = flopy.mf6.ModflowGwfchd(
+ gwf,
+ stress_period_data=chdspdict,
+ save_flows=False,
+ )
+
+ # lak package
+ if "fail" in name:
+ bedleak = -100.0
+ elif "none" in name:
+ bedleak = "none"
+ else:
+ bedleak = DNODATA
+
+ # [] []
+ packagedata = [
+ [0, 100.0, 1, "lake1"],
+ [1, 100.0, 1, "lake2"],
+ ]
+ #
+ connectiondata = [
+ [0, 0, (0, 1, 1), "vertical", bedleak, 0.0, 0.0, 0.0, 0.0],
+ [1, 0, (0, 2, 2), "vertical", bedleak, 0.0, 0.0, 0.0, 0.0],
+ ]
+ lak = flopy.mf6.ModflowGwflak(
+ gwf,
+ boundnames=True,
+ surfdep=1.0,
+ print_input=True,
+ print_stage=True,
+ print_flows=True,
+ save_flows=True,
+ budget_filerecord=f"{name}.lak.bud",
+ nlakes=len(packagedata),
+ packagedata=packagedata,
+ connectiondata=connectiondata,
+ )
+ # lak.remove()
+
+ # output control
+ oc = flopy.mf6.ModflowGwfoc(
+ gwf,
+ budget_filerecord=f"{name}.cbc",
+ headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")],
+ saverecord=[("BUDGET", "ALL")],
+ printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")],
+ )
+
+ return sim, None
+
+
+def check_output(idx, test):
+ name = cases[idx]
+
+ # lak budget
+ if "fail" not in name:
+ fpth = os.path.join(test.workspace, f"{name}.lak.bud")
+ bobj = flopy.utils.CellBudgetFile(fpth, precision="double")
+ bobj.list_unique_records()
+ records = bobj.get_data(text="GWF")
+ for r in records:
+ assert np.allclose(r["q"][0], -4.79616347e-12)
+ assert np.allclose(r["q"][1], -6.19237994e-12)
+
+
+@pytest.mark.parametrize("idx, name", enumerate(cases))
+def test_mf6model(idx, name, function_tmpdir, targets):
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ xfail="fail" in str(function_tmpdir),
+ )
+ test.run()
diff --git a/autotest/test_gwf_lak_wetlakbedarea01.py b/autotest/test_gwf_lak_wetlakbedarea01.py
index cba89465af3..5a0043a0cc0 100644
--- a/autotest/test_gwf_lak_wetlakbedarea01.py
+++ b/autotest/test_gwf_lak_wetlakbedarea01.py
@@ -1,19 +1,21 @@
-# A simple 2 layer by 1 row by 2 column model. Upper-right cell is the only
-# active LAK cell. Lake starts out initially dry and then is wetted by a
-# rising water table. A constant head boundary in the lower left corner cell
-# is used to raise water table. This autotest checks to ensure that the wetted
-# areas between the lake and the 2 connected cells (1 vertical, 1 horizontal)
-# is correct.
+"""
+A simple 2 layer by 1 row by 2 column model. Upper-right cell is the only
+active LAK cell. Lake starts out initially dry and then is wetted by a
+rising water table. A constant head boundary in the lower left corner cell
+is used to raise water table. This autotest checks to ensure that the wetted
+areas between the lake and the 2 connected cells (1 vertical, 1 horizontal)
+is correct.
+"""
import os
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
-ex = ["lak-1cellkbd"]
+cases = ["lak-1cellkbd"]
# Model units
length_units = "feet"
@@ -29,15 +31,15 @@
k33 = [1179.0, 1179.0]
ss = 3e-4
sy = 0.2
-chd_lr = 9.0
+chd_lr = 9.0
lak_strt = 9.0 # Starting lake stage
lak_bedleak = 10.0 # Lakebed leakance
-idomain = np.full((nlay, nrow, ncol), 1)
+idomain = np.full((nlay, nrow, ncol), 1)
idomain[0, 0, 1] = 0 # deactivate upper-right corner of 2x1x2 model
-top = 20.
-botm = [10., 0.]
+top = 20.0
+botm = [10.0, 0.0]
# define delr and delc
delr = 10.0
@@ -62,7 +64,18 @@
# Prepare constant head boundary data information
chd_spd = {}
-chd_inc = [9.999999, 10.0, 10.000001, 10.00001, 10.0001, 10.001, 10.01, 10.1, 10.11, 10.12]
+chd_inc = [
+ 9.999999,
+ 10.0,
+ 10.000001,
+ 10.00001,
+ 10.0001,
+ 10.001,
+ 10.01,
+ 10.1,
+ 10.11,
+ 10.12,
+]
for i, t in enumerate(range(len(perlen))):
chd_spd.update({i: [nlay - 1, nrow - 1, 0, chd_inc[i]]})
@@ -85,7 +98,7 @@
[10.25, 0.0280716, 0.4, 0.4],
[10.3, 0.068889924, 0.5, 0.5],
[10.35, 0.1690610, 0.6, 0.6],
- [10.4, 0.4148885490, 0.7, 0.7]
+ [10.4, 0.4148885490, 0.7, 0.7],
]
# Set solver parameters
@@ -95,12 +108,14 @@
rclose = 1e-6
relax = 0.97
+
def resolve_lvl(stg, hd, toplay):
ss = min(stg, toplay)
hh = min(hd, toplay)
thk = max(ss, hh)
return thk
+
def calc_qSat(top, bot, thk):
teps = 1e-6
tbmin = 0.0
@@ -135,13 +150,11 @@ def calc_qSat(top, bot, thk):
return y
-#
-# MODFLOW 6 flopy GWF object
-#
-def build_model(idx, dir):
+
+def build_models(idx, test):
# Base simulation and model name and workspace
- ws = dir
- name = ex[idx]
+ ws = test.workspace
+ name = cases[idx]
print("Building model...{}".format(name))
@@ -159,7 +172,7 @@ def build_model(idx, dir):
ats_filerecord=ats_filerecord,
nper=nper,
perioddata=tdis_rc,
- time_units=time_units
+ time_units=time_units,
)
if True:
@@ -231,18 +244,34 @@ def build_model(idx, dir):
# Instantiate constant head boundary package
flopy.mf6.ModflowGwfchd(gwf, stress_period_data=chd_spd)
-
+
# Instantiate LAK package
lak_conn = []
if use_embedded_lak:
- lak_conn.append([0, 0, (0, 0, 1), 'embeddedv', lak_bedleak, 0.0, 0.0, 1.0, 0.0])
+ lak_conn.append(
+ [0, 0, (0, 0, 1), "embeddedv", lak_bedleak, 0.0, 0.0, 1.0, 0.0]
+ )
else:
- lak_conn.append([0, 0, (0, 0, 0), 'horizontal', lak_bedleak, 10.0, 20.0, 10.0, 10.0])
- lak_conn.append([0, 1, (1, 0, 1), 'vertical', lak_bedleak, 0.0, 0.0, 0.0, 0.0])
+ lak_conn.append(
+ [
+ 0,
+ 0,
+ (0, 0, 0),
+ "horizontal",
+ lak_bedleak,
+ 10.0,
+ 20.0,
+ 10.0,
+ 10.0,
+ ]
+ )
+ lak_conn.append(
+ [0, 1, (1, 0, 1), "vertical", lak_bedleak, 0.0, 0.0, 0.0, 0.0]
+ )
lak_packagedata = [0, lak_strt, len(lak_conn)]
budpth = f"{gwfname}.lak.cbc"
- tab6_filename = '{}.laktab'.format(gwfname)
+ tab6_filename = "{}.laktab".format(gwfname)
if use_embedded_lak:
# LAK package input requires tables option when using embedded lakes.
lak = flopy.mf6.ModflowGwflak(
@@ -277,7 +306,7 @@ def build_model(idx, dir):
budget_filerecord=budpth,
time_conversion=86400,
length_conversion=3.28081,
- #surfdep=0.05,
+ # surfdep=0.05,
pname="LAK-1",
filename="{}.lak".format(gwfname),
)
@@ -289,23 +318,22 @@ def build_model(idx, dir):
]
}
lak.obs.initialize(
- filename=obs_file,
- digits=10,
- print_input=True,
- continuous=obs_dict
+ filename=obs_file, digits=10, print_input=True, continuous=obs_dict
)
if use_embedded_lak:
tabinput = []
for itm in lak_tab:
tabinput.append([itm[0], itm[1], itm[2], itm[3]])
-
- laktab = flopy.mf6.ModflowUtllaktab(gwf,
- nrow=len(tabinput),
- ncol=len(tabinput[0]),
- table=tabinput,
- filename=tab6_filename,
- pname='LAK_tab',
- parent_file=lak)
+
+ laktab = flopy.mf6.ModflowUtllaktab(
+ gwf,
+ nrow=len(tabinput),
+ ncol=len(tabinput[0]),
+ table=tabinput,
+ filename=tab6_filename,
+ pname="LAK_tab",
+ parent_file=lak,
+ )
# Instantiate output control package
head_filerecord = "{}.hds".format(gwfname)
@@ -315,38 +343,38 @@ def build_model(idx, dir):
head_filerecord=head_filerecord,
budget_filerecord=budget_filerecord,
saverecord=[("HEAD", "LAST"), ("BUDGET", "LAST")],
- printrecord=[("HEAD", "ALL")]
+ printrecord=[("HEAD", "ALL")],
)
return sim, None
-def eval_results(sim):
- print("evaluating results...")
-
+def check_output(idx, test):
# read flow results from model
- name = ex[sim.idxsim]
+ name = cases[idx]
gwfname = "gwf-" + name
# read flow results from model
- sim1 = flopy.mf6.MFSimulation.load(sim_ws=sim.simpath, load_only=["dis"])
+ sim1 = flopy.mf6.MFSimulation.load(
+ sim_ws=test.workspace, load_only=["dis"]
+ )
gwf = sim1.get_model(gwfname)
# get final lake stage
- lk_pth0 = os.path.join(sim.simpath, f"{gwfname}.lak.obs.csv")
+ lk_pth0 = os.path.join(test.workspace, f"{gwfname}.lak.obs.csv")
lkstg = np.genfromtxt(lk_pth0, names=True, delimiter=",")
lkstg_time = lkstg["time"].tolist()
lkstg_val = lkstg["STAGE"].tolist()
# Store only the values at the end of the time step
- idx = [i for i, val in enumerate(lkstg_time) if not val.is_integer()]
- for i in idx[::-1]:
+ indices = [i for i, val in enumerate(lkstg_time) if not val.is_integer()]
+ for i in indices[::-1]:
lkstg_time.pop(i)
lkstg_val.pop(i)
# Get heads
fname = gwfname + ".hds"
- fname = os.path.join(sim.simpath, fname)
+ fname = os.path.join(test.workspace, fname)
assert os.path.isfile(fname)
hdobj = flopy.utils.binaryfile.HeadFile(fname, precision="double")
@@ -354,7 +382,7 @@ def eval_results(sim):
# Get lake/gwf exchange information
fname = gwfname + ".lak.cbc"
- fname = os.path.join(sim.simpath, fname)
+ fname = os.path.join(test.workspace, fname)
assert os.path.isfile(fname)
lakobj = flopy.utils.binaryfile.CellBudgetFile(fname, precision="double")
@@ -370,8 +398,10 @@ def eval_results(sim):
wetted_out = np.array(wetted_out)
# Compare MF6 output to answer calculated here
- msg = 'Compare value written by MF6 to a value calculated here based on ' \
- 'either lake stage or gw head'
+ msg = (
+ "Compare value written by MF6 to a value calculated here based on "
+ "either lake stage or gw head"
+ )
for tm in np.arange(wetted_out.shape[0]):
for conn in np.arange(wetted_out.shape[1]):
stg = lkstg_val[tm]
@@ -410,17 +440,14 @@ def eval_results(sim):
monotonicIncrease = np.diff(wetted_out[2:, 0])
assert np.all(monotonicIncrease > 0), msg
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- ws = str(function_tmpdir)
- test = TestFramework()
- test.build(build_model, idx, ws)
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, exfunc=eval_results, idxsim=idx
- ),
- ws,
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ targets=targets,
)
+ test.run()
diff --git a/autotest/test_gwf_lak_wetlakbedarea02.py b/autotest/test_gwf_lak_wetlakbedarea02.py
index 91e83afc917..b72807e67df 100644
--- a/autotest/test_gwf_lak_wetlakbedarea02.py
+++ b/autotest/test_gwf_lak_wetlakbedarea02.py
@@ -1,17 +1,19 @@
-# An adaptation of the LAK package problem 1 supplemented with an additional
-# layer that has variable thinkness to help test that the shared wetted area
-# between a lakebed and groundwater cells in contact with the lake are written
-# to the LAK cbc output file correctly.
+"""
+An adaptation of the LAK package problem 1 supplemented with an additional
+layer that has variable thinkness to help test that the shared wetted area
+between a lakebed and groundwater cells in contact with the lake are written
+to the LAK cbc output file correctly.
+"""
import os
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
-ex = ["lak-wetlkbd"]
+cases = ["lak-wetlkbd"]
# Model units
length_units = "feet"
@@ -201,10 +203,10 @@ def calc_qSat(top, bot, thk):
#
-def build_model(idx, dir):
+def build_models(idx, test):
# Base simulation and model name and workspace
- ws = dir
- name = ex[idx]
+ ws = test.workspace
+ name = cases[idx]
print("Building model...{}".format(name))
@@ -330,25 +332,25 @@ def build_model(idx, dir):
return sim, None
-def eval_results(sim):
- print("evaluating results...")
-
+def check_output(idx, test):
# read flow results from model
- name = ex[sim.idxsim]
+ name = cases[idx]
gwfname = "gwf-" + name
# read flow results from model
- sim1 = flopy.mf6.MFSimulation.load(sim_ws=sim.simpath, load_only=["dis"])
+ sim1 = flopy.mf6.MFSimulation.load(
+ sim_ws=test.workspace, load_only=["dis"]
+ )
gwf = sim1.get_model(gwfname)
# get final lake stage
- lk_pth0 = os.path.join(sim.simpath, f"{gwfname}.lak.obs.csv")
+ lk_pth0 = os.path.join(test.workspace, f"{gwfname}.lak.obs.csv")
lkstg = np.genfromtxt(lk_pth0, names=True, delimiter=",")
lkstg_val = lkstg["STAGE"]
# Get heads
fname = gwfname + ".hds"
- fname = os.path.join(sim.simpath, fname)
+ fname = os.path.join(test.workspace, fname)
assert os.path.isfile(fname)
hdobj = flopy.utils.binaryfile.HeadFile(fname, precision="double")
@@ -356,7 +358,7 @@ def eval_results(sim):
# Get lake/gwf exchange information
fname = gwfname + ".lak.cbc"
- fname = os.path.join(sim.simpath, fname)
+ fname = os.path.join(test.workspace, fname)
assert os.path.isfile(fname)
lakobj = flopy.utils.binaryfile.CellBudgetFile(fname, precision="double")
@@ -373,7 +375,7 @@ def eval_results(sim):
"The wetted interfacial areas saved in the binary output file "
"(.cbc) do not match the values calculated in the autotest script"
)
- for idx, itm in enumerate(lak_con):
+ for ii, itm in enumerate(lak_con):
k, i, j = itm[2]
ctype = itm[3]
if ctype[0] == "h":
@@ -393,20 +395,16 @@ def eval_results(sim):
width = delc[i]
warea = length * width
- assert np.isclose(warea, checks_out[idx], atol=1e-5), msg
+ assert np.isclose(warea, checks_out[ii], atol=1e-5), msg
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- ws = str(function_tmpdir)
- test = TestFramework()
- test.build(build_model, idx, ws)
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, exfunc=eval_results, idxsim=idx
- ),
- ws,
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
)
+ test.run()
diff --git a/autotest/test_gwf_laket.py b/autotest/test_gwf_laket.py
index b180ff3baed..91f00bd8d48 100644
--- a/autotest/test_gwf_laket.py
+++ b/autotest/test_gwf_laket.py
@@ -1,17 +1,15 @@
-# Test for checking lak evaporation.
+"""Test for checking lak evaporation."""
import os
-import shutil
-import sys
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
-ex = [
+cases = [
"gwf_laket01",
"gwf_laket02",
"gwf_laket03",
@@ -23,7 +21,7 @@
def get_model(idx, ws):
- name = ex[idx]
+ name = cases[idx]
nlay = 1
nrow = 1
ncol = 1
@@ -112,7 +110,7 @@ def get_model(idx, ws):
]
nlakeconn = 1
- # pak_data = [lakeno, strt, nlakeconn]
+ # pak_data = [ifno, strt, nlakeconn]
pak_data = [(0, lakestage[idx], nlakeconn)]
belev = top
@@ -175,18 +173,14 @@ def get_model(idx, ws):
return sim
-def build_model(idx, dir):
-
+def build_models(idx, test):
# build MODFLOW 6 files
- sim = get_model(idx, dir)
-
+ sim = get_model(idx, test.workspace)
return sim, None
-def eval_laket(sim):
- msg = "Evaluating Lake ET. "
-
- fpth = os.path.join(sim.simpath, f"{sim.name}.lak.obs.csv")
+def check_output(idx, test):
+ fpth = os.path.join(test.workspace, f"{test.name}.lak.obs.csv")
try:
tc = np.genfromtxt(fpth, names=True, delimiter=",")
except:
@@ -206,7 +200,7 @@ def eval_laket(sim):
(8.000000000000, 5.2000000000000028, -0.1),
(9.000000000000, 5.1000000000000032, -0.1),
(10.00000000000, 5.0000000000000036, -0.1),
- (11.00000000000, 5.0000000000000000, 0.0),
+ (11.00000000000, 5.0000000000000000, 0.0),
(12.00000000000, 5.0999999999999996, -0.1),
(13.00000000000, 5.1999999999999993, -0.1),
(14.00000000000, 5.2999999999999989, -0.1),
@@ -257,10 +251,10 @@ def eval_laket(sim):
(5.000000000000, 5.1402629856607369, -0.1),
(6.000000000000, 5.0345111305663464, -0.1),
(7.000000000000, 5.0000000000000000, -0.0345111305663464),
- (8.000000000000, 5.0000000000000000, 0.0),
- (9.000000000000, 5.0000000000000000, 0.0),
- (10.00000000000, 5.0000000000000000, 0.0),
- (11.00000000000, 5.0000000000000000, 0.0),
+ (8.000000000000, 5.0000000000000000, 0.0),
+ (9.000000000000, 5.0000000000000000, 0.0),
+ (10.00000000000, 5.0000000000000000, 0.0),
+ (11.00000000000, 5.0000000000000000, 0.0),
(12.00000000000, 5.0857142857142854, -0.1),
(13.00000000000, 5.1591836734693874, -0.1),
(14.00000000000, 5.2221574344023320, -0.1),
@@ -277,41 +271,34 @@ def eval_laket(sim):
),
}
- if sim.idxsim in (
+ if idx in (
0,
1,
2,
):
- evap_compare = np.allclose(obs[sim.idxsim]["evap"], tc["EVAP"])
- stage_compare = np.allclose(obs[sim.idxsim]["stage"], tc["LAKESTAGE"])
+ evap_compare = np.allclose(obs[idx]["evap"], tc["EVAP"])
+ stage_compare = np.allclose(obs[idx]["stage"], tc["LAKESTAGE"])
else:
evap_compare = True
stage_compare = True
- sim.success = True
+ test.success = True
if not evap_compare:
- sim.success = False
+ test.success = False
msg += f" Lake evaporation comparison failed."
if not stage_compare:
- sim.success = False
+ test.success = False
msg += f" Lake stage comparison failed."
- assert sim.success, msg
+ assert test.success, msg
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name,
- exe_dict=targets,
- exfunc=eval_laket,
- idxsim=idx,
- mf6_regression=False,
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
)
+ test.run()
diff --git a/autotest/test_gwf_lakobs01.py b/autotest/test_gwf_lakobs01.py
index 8cfc8ff2de9..cff4ab1881d 100644
--- a/autotest/test_gwf_lakobs01.py
+++ b/autotest/test_gwf_lakobs01.py
@@ -1,21 +1,19 @@
-# Test for checking lak observation input. The following observation types:
-# 'lak', 'wetted-area', and 'conductance,' require that ID2 be provided when
-# ID is an integer corresponding to a lake number and not BOUNDNAME.
-# See table in LAK Package section of mf6io.pdf for an explanation of ID,
-# ID2, and Observation Type.
+"""
+Test for checking lak observation input. The following observation types:
+'lak', 'wetted-area', and 'conductance,' require that ID2 be provided when
+ID is an integer corresponding to a lake number and not BOUNDNAME.
+See table in LAK Package section of mf6io.pdf for an explanation of ID,
+ID2, and Observation Type.
+"""
import os
-import shutil
-import sys
import flopy
import numpy as np
import pytest
-from framework import TestFramework
-from simulation import TestSimulation
-ex = "gwf_lakobs_01a"
+cases = "gwf_lakobs_01a"
gwf = None
@@ -54,7 +52,7 @@ def build_model(dir, exe):
nouter, ninner = 700, 300
hclose, rclose, relax = 1e-8, 1e-6, 0.97
- name = ex
+ name = cases
# build MODFLOW 6 files
sim = flopy.mf6.MFSimulation(
@@ -138,7 +136,7 @@ def build_model(dir, exe):
irch[i, j] = k + 1
nlakeconn = len(lake_vconnect)
- # pak_data = [lakeno, strt, nlakeconn]
+ # pak_data = [ifno, strt, nlakeconn]
initial_stage = 0.1
pak_data = [(0, initial_stage, nlakeconn)]
@@ -206,13 +204,8 @@ def build_model(dir, exe):
def test_mf6model(function_tmpdir, targets):
- mf6 = targets["mf6"]
-
- # initialize testing framework
- test = TestFramework()
-
# build the models
- sim = build_model(str(function_tmpdir), mf6)
+ sim = build_model(str(function_tmpdir), targets["mf6"])
# write model input
sim.write_simulation()
@@ -235,8 +228,8 @@ def test_mf6model(function_tmpdir, targets):
)
# fix the error and attempt to rerun model
- orig_fl = str(function_tmpdir / (ex + ".lak.obs"))
- new_fl = str(function_tmpdir / (ex + ".lak.obs.new"))
+ orig_fl = str(function_tmpdir / (cases + ".lak.obs"))
+ new_fl = str(function_tmpdir / (cases + ".lak.obs.new"))
sr = open(orig_fl, "r")
sw = open(new_fl, "w")
diff --git a/autotest/test_gwf_libmf6_evt01.py b/autotest/test_gwf_libmf6_evt01.py
index b962f653f30..067e7ac7ae4 100644
--- a/autotest/test_gwf_libmf6_evt01.py
+++ b/autotest/test_gwf_libmf6_evt01.py
@@ -1,8 +1,6 @@
"""
-MODFLOW 6 Autotest
-Test the bmi which is used update the calculate a head-based pumping rate that
-is equivalent to use of the evapotranspiration package in the
-non-bmi simulation.
+Test bmi with a head-based pumping rate equivalent to
+the evapotranspiration package in a non-bmi simulation.
"""
import os
@@ -10,11 +8,11 @@
import flopy
import numpy as np
import pytest
-from framework import TestFramework
from modflowapi import ModflowApi
-from simulation import TestSimulation
-ex = ["libgwf_evt01"]
+from framework import TestFramework
+
+cases = ["libgwf_evt01"]
# et variables
et_max = 0.1
@@ -128,14 +126,14 @@ def get_model(ws, name, bmi=False):
return sim
-def build_model(idx, dir):
+def build_models(idx, test):
# build MODFLOW 6 files
- ws = dir
- name = ex[idx]
+ ws = test.workspace
+ name = cases[idx]
sim = get_model(ws, name)
# build comparison model
- ws = os.path.join(dir, "libmf6")
+ ws = os.path.join(test.workspace, "libmf6")
mc = get_model(ws, name, bmi=True)
return sim, mc
@@ -153,7 +151,7 @@ def head2et_wellrate(h):
def api_func(exe, idx, model_ws=None):
- name = ex[idx].upper()
+ name = cases[idx].upper()
if model_ws is None:
model_ws = "."
output_file_path = os.path.join(model_ws, "mfsim.stdout")
@@ -184,7 +182,7 @@ def api_func(exe, idx, model_ws=None):
max_iter = mf6.get_value(mxit_tag)
# get copy of well data
- well_tag = mf6.get_var_address("BOUND", name, "WEL_0")
+ well_tag = mf6.get_var_address("Q", name, "WEL_0")
well = mf6.get_value(well_tag)
# check NPF type
@@ -202,7 +200,6 @@ def api_func(exe, idx, model_ws=None):
# model time loop
idx = 0
while current_time < end_time:
-
# get dt and prepare for non-linear iterations
dt = mf6.get_time_step()
mf6.prepare_time_step(dt)
@@ -212,10 +209,9 @@ def api_func(exe, idx, model_ws=None):
mf6.prepare_solve()
while kiter < max_iter:
-
# update well rate
twell[:] = head2et_wellrate(head[0])
- well[:, 0] = twell[:]
+ well[:] = twell[:]
mf6.set_value(well_tag, well)
# solve with updated well rate
@@ -254,16 +250,13 @@ def api_func(exe, idx, model_ws=None):
return True, open(output_file_path).readlines()
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, idxsim=idx, api_func=api_func
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ api_func=lambda exe, ws: api_func(exe, idx, ws),
)
+ test.run()
diff --git a/autotest/test_gwf_libmf6_ghb01.py b/autotest/test_gwf_libmf6_ghb01.py
index 43e7ce239f5..4809b430c45 100644
--- a/autotest/test_gwf_libmf6_ghb01.py
+++ b/autotest/test_gwf_libmf6_ghb01.py
@@ -1,21 +1,19 @@
"""
-MODFLOW 6 Autotest
-Test the api which is used update to set the simulate the effect of a general
-head boundary (ghb) at the downgradient end of the model with a head below the
-bottom of the cell. The api results are compared to a non-api simulation that
-uses the well package to simulate the effect of the same ghb. This is a
-possible solution to https://github.com/MODFLOW-USGS/modflow6/issues/724
+Simulate the effect of a general head boundary (ghb) at the downgradient end
+of the model with a head below the bottom of the cell. Compare api result to
+a non-api simulation using the well package to simulate an equivalent ghb.
+Possible solution to https://github.com/MODFLOW-USGS/modflow6/issues/724
"""
import os
import flopy
import numpy as np
import pytest
-from framework import TestFramework
from modflowapi import ModflowApi
-from simulation import TestSimulation
-ex = ["libgwf_ghb01"]
+from framework import TestFramework
+
+cases = ["libgwf_ghb01"]
# temporal discretization
nper = 10
@@ -162,15 +160,14 @@ def get_model(ws, name, api=False):
return sim
-def build_model(idx, dir):
+def build_models(idx, test):
# build MODFLOW 6 files
- ws = dir
- name = ex[idx]
-
+ ws = test.workspace
+ name = cases[idx]
sim = get_model(ws, name)
# build comparison model with zeroed values
- ws = os.path.join(dir, "libmf6")
+ ws = os.path.join(test.workspace, "libmf6")
mc = get_model(ws, name, api=True)
return sim, mc
@@ -183,7 +180,7 @@ def api_ghb_pak(hcof, rhs):
def api_func(exe, idx, model_ws=None):
- name = ex[idx].upper()
+ name = cases[idx].upper()
if model_ws is None:
model_ws = "."
output_file_path = os.path.join(model_ws, "mfsim.stdout")
@@ -228,7 +225,6 @@ def api_func(exe, idx, model_ws=None):
# model time loop
while current_time < end_time:
-
# get dt
dt = mf6.get_time_step()
@@ -277,16 +273,13 @@ def api_func(exe, idx, model_ws=None):
return True, open(output_file_path).readlines()
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, idxsim=idx, api_func=api_func
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ targets=targets,
+ api_func=lambda exe, ws: api_func(exe, idx, ws),
)
+ test.run()
diff --git a/autotest/test_gwf_libmf6_ifmod01.py b/autotest/test_gwf_libmf6_ifmod01.py
index 51577ed622f..6ffe87935c3 100644
--- a/autotest/test_gwf_libmf6_ifmod01.py
+++ b/autotest/test_gwf_libmf6_ifmod01.py
@@ -1,5 +1,4 @@
"""
-MODFLOW 6 Autotest
Test the interface model approach for coupling two gwf models.
We need the API for this, as the interface model is hidden and
not present in any of the output. The setup is two coupled
@@ -16,19 +15,16 @@
import flopy
import pytest
-from framework import TestFramework
from modflowapi import ModflowApi
-from simulation import TestSimulation
-ex = ["libgwf_ifmod01"]
+from framework import TestFramework
-# global convenience...
+cases = ["libgwf_ifmod01"]
name_left = "leftmodel"
name_right = "rightmodel"
def get_model(dir, name):
-
useXT3D = True
# parameters and spd
@@ -197,14 +193,14 @@ def get_model(dir, name):
return sim
-def build_model(idx, dir):
+def build_models(idx, test):
# build MODFLOW 6 files
- ws = dir
- name = ex[idx]
+ ws = test.workspace
+ name = cases[idx]
sim = get_model(ws, name)
# build comparison model
- ws = os.path.join(dir, "libmf6")
+ ws = os.path.join(test.workspace, "libmf6")
sim_compare = get_model(ws, name)
return sim, sim_compare
@@ -307,17 +303,13 @@ def check_interface_models(mf6):
), "AREA in interface model does not match"
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- ws = str(function_tmpdir)
- test = TestFramework()
- test.build(build_model, idx, ws)
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, idxsim=idx, api_func=api_func
- ),
- ws,
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ targets=targets,
+ api_func=lambda exe, ws: api_func(exe, idx, ws),
)
+ test.run()
diff --git a/autotest/test_gwf_libmf6_ifmod02.py b/autotest/test_gwf_libmf6_ifmod02.py
index 5faa7d76512..b9f6462ddd3 100644
--- a/autotest/test_gwf_libmf6_ifmod02.py
+++ b/autotest/test_gwf_libmf6_ifmod02.py
@@ -1,5 +1,4 @@
"""
-MODFLOW 6 Autotest
Test the interface model approach for an inhomogeneous coupling
of three gwf models using the API. One exchange will have XT3D
enabled (Exg1) and the other one (Exg2) doesn't. And the top-left
@@ -41,11 +40,11 @@
import flopy
import numpy as np
import pytest
-from framework import TestFramework
from modflowapi import ModflowApi
-from simulation import TestSimulation
-ex = ["libgwf_ifmod02"]
+from framework import TestFramework
+
+cases = ["libgwf_ifmod02"]
# global convenience...
name_tl = "topleft"
@@ -54,7 +53,6 @@
def get_model(dir, name):
-
# parameters and spd
# tdis
nper = 1
@@ -291,14 +289,14 @@ def get_model(dir, name):
return sim
-def build_model(idx, dir):
+def build_models(idx, test):
# build MODFLOW 6 files
- ws = dir
- name = ex[idx]
+ ws = test.workspace
+ name = cases[idx]
sim = get_model(ws, name)
# build comparison model
- ws = os.path.join(dir, "libmf6")
+ ws = os.path.join(test.workspace, "libmf6")
sim_compare = get_model(ws, name)
return sim, sim_compare
@@ -403,17 +401,14 @@ def check_interface_models(mf6):
)
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
@pytest.mark.developmode
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, idxsim=idx, api_func=api_func
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ targets=targets,
+ api_func=lambda exe, ws: api_func(exe, idx, ws),
)
+ test.run()
diff --git a/autotest/test_gwf_libmf6_ifmod03.py b/autotest/test_gwf_libmf6_ifmod03.py
index c238d269110..f5d24a342db 100644
--- a/autotest/test_gwf_libmf6_ifmod03.py
+++ b/autotest/test_gwf_libmf6_ifmod03.py
@@ -1,5 +1,4 @@
"""
-MODFLOW 6 Autotest
Test the interface model approach for coupling two DIS
models where one is translated and rotated in space:
@@ -31,11 +30,11 @@
import flopy
import numpy as np
import pytest
-from framework import TestFramework
from modflowapi import ModflowApi
-from simulation import TestSimulation
-ex = ["libgwf_ifmod03"]
+from framework import TestFramework
+
+cases = ["libgwf_ifmod03"]
# global convenience...
name_left = "left"
@@ -44,12 +43,11 @@
def get_model(dir, name):
-
# parameters and spd
# tdis
nper = 1
tdis_rc = []
- for i in range(nper):
+ for _ in range(nper):
tdis_rc.append((1.0, 1, 1))
# solver data
@@ -208,14 +206,14 @@ def get_model(dir, name):
return sim
-def build_model(idx, dir):
+def build_models(idx, test):
# build MODFLOW 6 files
- ws = dir
- name = ex[idx]
+ ws = test.workspace
+ name = cases[idx]
sim = get_model(ws, name)
# build comparison model
- ws = os.path.join(dir, "libmf6")
+ ws = os.path.join(test.workspace, "libmf6")
sim_compare = get_model(ws, name)
return sim, sim_compare
@@ -289,16 +287,13 @@ def check_interface_models(mf6):
assert abs(ymax - ymin) < 1e-6
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, idxsim=idx, api_func=api_func
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ api_func=lambda exe, ws: api_func(exe, idx, ws),
)
+ test.run()
diff --git a/autotest/test_gwf_libmf6_rch01.py b/autotest/test_gwf_libmf6_rch01.py
index bb8252e8cff..b7ef4c50e6c 100644
--- a/autotest/test_gwf_libmf6_rch01.py
+++ b/autotest/test_gwf_libmf6_rch01.py
@@ -1,5 +1,4 @@
"""
-MODFLOW 6 Autotest
Test to make sure that recharge is passed to the highest active layer and
verify that recharge is in the highest active layer by looking at the
individual budget terms. For this test, there are two layers and five
@@ -13,11 +12,11 @@
import flopy
import numpy as np
import pytest
-from framework import TestFramework
from modflowapi import ModflowApi
-from simulation import TestSimulation
-ex = ["libgwf_rch01"]
+from framework import TestFramework
+
+cases = ["libgwf_rch01"]
# recharge package name
rch_pname = "RCH-1"
@@ -135,21 +134,21 @@ def get_model(ws, name, rech):
return sim
-def build_model(idx, dir):
+def build_models(idx, test):
# build MODFLOW 6 files
- ws = dir
- name = ex[idx]
+ ws = test.workspace
+ name = cases[idx]
sim = get_model(ws, name, rech=rch_spd)
# build comparison model
- ws = os.path.join(dir, "libmf6")
+ ws = os.path.join(test.workspace, "libmf6")
mc = get_model(ws, name, rech=0.0)
return sim, mc
def api_func(exe, idx, model_ws=None):
- name = ex[idx].upper()
+ name = cases[idx].upper()
if model_ws is None:
model_ws = "."
@@ -177,13 +176,12 @@ def api_func(exe, idx, model_ws=None):
max_iter = mf6.get_value(mxit_tag)
# get copy of recharge array
- rch_tag = mf6.get_var_address("BOUND", name, rch_pname)
+ rch_tag = mf6.get_var_address("RECHARGE", name, rch_pname)
new_recharge = mf6.get_value(rch_tag)
# model time loop
idx = 0
while current_time < end_time:
-
# get dt and prepare for non-linear iterations
dt = mf6.get_time_step()
mf6.prepare_time_step(dt)
@@ -193,7 +191,7 @@ def api_func(exe, idx, model_ws=None):
mf6.prepare_solve()
# update recharge
- new_recharge[:, 0] = rch_spd[idx] * area
+ new_recharge[:] = rch_spd[idx]
mf6.set_value(rch_tag, new_recharge)
while kiter < max_iter:
@@ -232,16 +230,13 @@ def api_func(exe, idx, model_ws=None):
return True, open(output_file_path).readlines()
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, idxsim=idx, api_func=api_func
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ api_func=lambda exe, ws: api_func(exe, idx, ws),
)
+ test.run()
diff --git a/autotest/test_gwf_libmf6_rch02.py b/autotest/test_gwf_libmf6_rch02.py
index 1e7a5797a3e..fc1b4c6fdf9 100644
--- a/autotest/test_gwf_libmf6_rch02.py
+++ b/autotest/test_gwf_libmf6_rch02.py
@@ -1,5 +1,4 @@
"""
-MODFLOW 6 Autotest
Test the bmi which is used to calculate a recharge rate that results in a
simulated head in the center of the model domain to be equal to the
simulated head in the non-bmi simulation.
@@ -10,11 +9,11 @@
import flopy
import numpy as np
import pytest
-from framework import TestFramework
from modflowapi import ModflowApi
-from simulation import TestSimulation
-ex = ["libgwf_rch02"]
+from framework import TestFramework
+
+cases = ["libgwf_rch02"]
# recharge package name
rch_pname = "RCH-1"
@@ -153,26 +152,25 @@ def get_model(ws, name, exe, rech=rch_spd):
return sim
-def build_model(idx, dir, exe):
+def build_models(idx, test):
# build MODFLOW 6 files
- ws = dir
- name = ex[idx]
- sim = get_model(ws, name, exe)
+ ws = test.workspace
+ name = cases[idx]
+ sim = get_model(ws, name, "mf6")
# build comparison model
- ws = os.path.join(dir, "libmf6")
- mc = get_model(ws, name, exe, rech=0.0)
+ ws = os.path.join(test.workspace, "libmf6")
+ mc = get_model(ws, name, "mf6", rech=0.0)
return sim, mc
def run_perturbation(mf6, max_iter, recharge, tag, rch):
-
mf6.prepare_solve()
kiter = 0
while kiter < max_iter:
# update recharge
- recharge[:, 0] = rch * area
+ recharge[:] = rch
mf6.set_value(tag, recharge)
# solve with updated well rate
has_converged = mf6.solve()
@@ -185,7 +183,7 @@ def run_perturbation(mf6, max_iter, recharge, tag, rch):
def api_func(exe, idx, model_ws=None):
print("\nBMI implementation test:")
- name = ex[idx].upper()
+ name = cases[idx].upper()
init_wd = os.path.abspath(os.getcwd())
if model_ws is not None:
os.chdir(model_ws)
@@ -193,7 +191,7 @@ def api_func(exe, idx, model_ws=None):
output_file_path = os.path.join(model_ws, "mfsim.stdout")
# get the observations from the standard run
- fpth = os.path.join("..", f"{ex[idx]}.head.obs.csv")
+ fpth = os.path.join("..", f"{cases[idx]}.head.obs.csv")
hobs = np.genfromtxt(fpth, delimiter=",", names=True)["H1_6_6"]
try:
@@ -222,7 +220,7 @@ def api_func(exe, idx, model_ws=None):
max_iter = mf6.get_value(mxit_tag)
# get copy of recharge array
- rch_tag = mf6.get_var_address("BOUND", name, rch_pname)
+ rch_tag = mf6.get_var_address("RECHARGE", name, rch_pname)
new_recharge = mf6.get_value(rch_tag).copy()
# determine initial recharge value
@@ -232,7 +230,6 @@ def api_func(exe, idx, model_ws=None):
# model time loop
idx = 0
while current_time < end_time:
-
# target head
htarget = hobs[idx]
@@ -308,20 +305,13 @@ def api_func(exe, idx, model_ws=None):
return True, open(output_file_path).readlines()
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(
- lambda i, d: build_model(i, d, targets["mf6"]),
- idx,
- str(function_tmpdir),
- )
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, idxsim=idx, api_func=api_func
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ targets=targets,
+ api_func=lambda exe, ws: api_func(exe, idx, ws),
)
+ test.run()
diff --git a/autotest/test_gwf_libmf6_riv01.py b/autotest/test_gwf_libmf6_riv01.py
index 0fc8dd7e598..ef42e2e29d7 100644
--- a/autotest/test_gwf_libmf6_riv01.py
+++ b/autotest/test_gwf_libmf6_riv01.py
@@ -1,5 +1,4 @@
"""
-MODFLOW 6 Autotest
Test the bmi which is used update to set the river stages to
the same values as they are in the non-bmi simulation.
"""
@@ -8,11 +7,11 @@
import flopy
import numpy as np
import pytest
-from framework import TestFramework
from modflowapi import ModflowApi
-from simulation import TestSimulation
-ex = ["libgwf_riv01"]
+from framework import TestFramework
+
+cases = ["libgwf_riv01"]
# temporal discretization
nper = 10
@@ -129,10 +128,10 @@ def get_model(ws, name, riv_spd):
return sim
-def build_model(idx, dir):
+def build_models(idx, test):
# build MODFLOW 6 files
- ws = dir
- name = ex[idx]
+ ws = test.workspace
+ name = cases[idx]
# create river data
rd = [
@@ -146,7 +145,7 @@ def build_model(idx, dir):
sim = get_model(ws, name, riv_spd={0: rd, 5: rd2})
# build comparison model with zeroed values
- ws = os.path.join(dir, "libmf6")
+ ws = os.path.join(test.workspace, "libmf6")
rd_bmi = [[(0, 0, icol), 999.0, 999.0, 0.0] for icol in range(1, ncol - 1)]
mc = get_model(ws, name, riv_spd={0: rd_bmi})
@@ -154,7 +153,7 @@ def build_model(idx, dir):
def api_func(exe, idx, model_ws=None):
- name = ex[idx].upper()
+ name = cases[idx].upper()
if model_ws is None:
model_ws = "."
@@ -178,13 +177,16 @@ def api_func(exe, idx, model_ws=None):
end_time = mf6.get_end_time()
# get copy of (multi-dim) array with river parameters
- riv_tag = mf6.get_var_address("BOUND", name, riv_packname)
- new_spd = mf6.get_value(riv_tag)
+ stage_tag = mf6.get_var_address("STAGE", name, riv_packname)
+ cond_tag = mf6.get_var_address("COND", name, riv_packname)
+ rbot_tag = mf6.get_var_address("RBOT", name, riv_packname)
+ new_stage = mf6.get_value(stage_tag)
+ new_cond = mf6.get_value(cond_tag)
+ new_rbot = mf6.get_value(rbot_tag)
# model time loop
idx = 0
while current_time < end_time:
-
# get dt
dt = mf6.get_time_step()
@@ -192,16 +194,18 @@ def api_func(exe, idx, model_ws=None):
mf6.prepare_time_step(dt)
# set the RIV data through the BMI
+ # change cond and rbot data
+ new_cond[:] = [riv_cond]
+ new_rbot[:] = [riv_bot]
+ mf6.set_value(cond_tag, new_cond)
+ mf6.set_value(rbot_tag, new_rbot)
+ # change stage data
if current_time < 5:
- # set columns of BOUND data (we're setting entire columns of the
- # 2D array for convenience, setting only the value for the active
- # stress period should work too)
- new_spd[:] = [riv_stage, riv_cond, riv_bot]
- mf6.set_value(riv_tag, new_spd)
+ new_stage[:] = [riv_stage]
+ mf6.set_value(stage_tag, new_stage)
else:
- # change only stage data
- new_spd[:] = [riv_stage2, riv_cond, riv_bot]
- mf6.set_value(riv_tag, new_spd)
+ new_stage[:] = [riv_stage2]
+ mf6.set_value(stage_tag, new_stage)
kiter = 0
mf6.prepare_solve()
@@ -242,16 +246,13 @@ def api_func(exe, idx, model_ws=None):
return True, open(output_file_path).readlines()
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, idxsim=idx, api_func=api_func
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ api_func=lambda exe, ws: api_func(exe, idx, ws),
)
+ test.run()
diff --git a/autotest/test_gwf_libmf6_riv02.py b/autotest/test_gwf_libmf6_riv02.py
index eb3337c0561..2f94f7717e5 100644
--- a/autotest/test_gwf_libmf6_riv02.py
+++ b/autotest/test_gwf_libmf6_riv02.py
@@ -1,5 +1,4 @@
"""
-MODFLOW 6 Autotest
Test the api which is used set hcof and rhs in api package compare to river
package in the non-api simulation.
"""
@@ -8,11 +7,11 @@
import flopy
import numpy as np
import pytest
-from framework import TestFramework
from modflowapi import ModflowApi
-from simulation import TestSimulation
-ex = ["libgwf_riv02"]
+from framework import TestFramework
+
+cases = ["libgwf_riv02"]
# temporal discretization
nper = 10
@@ -132,10 +131,10 @@ def get_model(ws, name, riv_spd, api=False):
return sim
-def build_model(idx, dir):
+def build_models(idx, test):
# build MODFLOW 6 files
- ws = dir
- name = ex[idx]
+ ws = test.workspace
+ name = cases[idx]
# create river data
rd = [
@@ -149,7 +148,7 @@ def build_model(idx, dir):
sim = get_model(ws, name, riv_spd={0: rd, 5: rd2})
# build comparison model with zeroed values
- ws = os.path.join(dir, "libmf6")
+ ws = os.path.join(test.workspace, "libmf6")
rd_api = [[(0, 0, icol), 999.0, 999.0, 0.0] for icol in range(1, ncol - 1)]
mc = get_model(ws, name, riv_spd={0: rd_api}, api=True)
@@ -168,7 +167,7 @@ def api_riv_pak(stage, h, hcof, rhs):
def api_func(exe, idx, model_ws=None):
- name = ex[idx].upper()
+ name = cases[idx].upper()
if model_ws is None:
model_ws = "."
@@ -216,7 +215,6 @@ def api_func(exe, idx, model_ws=None):
# model time loop
idx = 0
while current_time < end_time:
-
# get dt
dt = mf6.get_time_step()
@@ -273,16 +271,13 @@ def api_func(exe, idx, model_ws=None):
return True, open(output_file_path).readlines()
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, idxsim=idx, api_func=api_func
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ api_func=lambda exe, ws: api_func(exe, idx, ws),
)
+ test.run()
diff --git a/autotest/test_gwf_libmf6_sto01.py b/autotest/test_gwf_libmf6_sto01.py
index 4cbe8caeb68..24283ffa3ee 100644
--- a/autotest/test_gwf_libmf6_sto01.py
+++ b/autotest/test_gwf_libmf6_sto01.py
@@ -1,5 +1,4 @@
"""
-MODFLOW 6 Autotest
Test the bmi set_value function, which is used update
the Sy=0 value with same Sy used to calculate SC2 in
the non-bmi simulation.
@@ -10,11 +9,11 @@
import flopy
import numpy as np
import pytest
-from framework import TestFramework
from modflowapi import ModflowApi
-from simulation import TestSimulation
-ex = ["libgwf_sto01"]
+from framework import TestFramework
+
+cases = ["libgwf_sto01"]
# average recharge rate
avg_rch = 0.001
@@ -138,21 +137,21 @@ def get_model(ws, name, sy):
return sim
-def build_model(idx, dir):
+def build_models(idx, test):
# build MODFLOW 6 files
- ws = dir
- name = ex[idx]
+ ws = test.workspace
+ name = cases[idx]
sim = get_model(ws, name, sy=sy_val)
# build comparison model
- ws = os.path.join(dir, "libmf6")
+ ws = os.path.join(test.workspace, "libmf6")
mc = get_model(ws, name, sy=0.0)
return sim, mc
def api_func(exe, idx, model_ws=None):
- name = ex[idx].upper()
+ name = cases[idx].upper()
if model_ws is None:
model_ws = "."
@@ -185,7 +184,6 @@ def api_func(exe, idx, model_ws=None):
# model time loop
idx = 0
while current_time < end_time:
-
# run the time step
try:
mf6.update()
@@ -209,16 +207,13 @@ def api_func(exe, idx, model_ws=None):
@pytest.mark.slow
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- test = TestFramework()
- test.build(build_model, idx, str(function_tmpdir))
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, idxsim=idx, api_func=api_func
- ),
- str(function_tmpdir),
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ api_func=lambda exe, ws: api_func(exe, idx, ws),
)
+ test.run()
diff --git a/autotest/test_gwf_maw01.py b/autotest/test_gwf_maw01.py
new file mode 100644
index 00000000000..d94a41d37bf
--- /dev/null
+++ b/autotest/test_gwf_maw01.py
@@ -0,0 +1,189 @@
+import os
+from types import SimpleNamespace
+
+import flopy
+import numpy as np
+import pytest
+
+cases = ["maw01", "maw01nwt", "maw01nwtur"]
+budtol = 1e-2
+bud_lst = ["GWF_IN", "GWF_OUT", "RATE_IN", "RATE_OUT"]
+krylov = ["CG", "BICGSTAB", "BICGSTAB"]
+newton = [None, "NEWTON", "NEWTON UNDER_RELAXATION"]
+nlay = 1
+nrow = 1
+ncol = 3
+nper = 3
+delr = 300
+delc = 300
+perlen = 3 * [1]
+nstp = 3 * [1]
+tsmult = 3 * [1]
+well = SimpleNamespace(
+ observations={"maw_obs.csv": [("mh1", "head", 1)]},
+ packagedata=[[0, 0.1, 50.0, 100.0, "THIEM", 1]],
+ connectiondata=[[0, 0, (0, 0, 1), 100.0, 50.0, 1.0, 0.1]],
+ perioddata=[[0, "rate", 0.0]],
+)
+strt = 100
+hk = 1
+nouter = 100
+ninner = 300
+hclose = 1e-9
+rclose = 1e-3
+relaxation_factor = 1
+compare = False
+
+
+def build_model(idx, ws, mf6):
+ name = cases[idx]
+ sim = flopy.mf6.MFSimulation(
+ sim_name=name,
+ version="mf6",
+ exe_name=mf6,
+ sim_ws=ws,
+ )
+
+ # create tdis package
+ tdis_rc = [(perlen[i], nstp[i], tsmult[i]) for i in range(nper)]
+ tdis = flopy.mf6.ModflowTdis(
+ sim, time_units="DAYS", nper=nper, perioddata=tdis_rc
+ )
+
+ # create gwf model
+ gwf = flopy.mf6.MFModel(
+ sim,
+ model_type="gwf6",
+ modelname=name,
+ model_nam_file=f"{name}.nam",
+ )
+ gwf.name_file.newtonoptions = newton[idx]
+
+ # create iterative model solution and register the gwf model with it
+ ims = flopy.mf6.ModflowIms(
+ sim,
+ print_option="SUMMARY",
+ outer_dvclose=hclose,
+ outer_maximum=nouter,
+ under_relaxation="NONE",
+ inner_maximum=ninner,
+ inner_dvclose=hclose,
+ rcloserecord=rclose,
+ linear_acceleration=krylov[idx],
+ scaling_method="NONE",
+ reordering_method="NONE",
+ relaxation_factor=relaxation_factor,
+ )
+ sim.register_ims_package(ims, [gwf.name])
+
+ dis = flopy.mf6.ModflowGwfdis(
+ gwf,
+ nlay=nlay,
+ nrow=nrow,
+ ncol=ncol,
+ delr=delr,
+ delc=delc,
+ top=100.0,
+ botm=0.0,
+ idomain=1,
+ filename=f"{name}.dis",
+ )
+
+ # initial conditions
+ ic = flopy.mf6.ModflowGwfic(gwf, strt=strt, filename=f"{name}.ic")
+
+ # node property flow
+ npf = flopy.mf6.ModflowGwfnpf(
+ gwf,
+ save_flows=True,
+ icelltype=1,
+ k=hk,
+ k33=hk,
+ filename=f"{name}.npf",
+ )
+ # storage
+ sto = flopy.mf6.ModflowGwfsto(
+ gwf,
+ save_flows=True,
+ iconvert=1,
+ ss=0.0,
+ sy=0.1,
+ steady_state={0: True},
+ # transient={1: False},
+ filename=f"{name}.sto",
+ )
+
+ # chd files
+ chdlist0 = []
+ chdlist0.append([(0, 0, 0), 100.0])
+ chdlist0.append([(0, 0, 2), 100.0])
+
+ chdlist1 = []
+ chdlist1.append([(0, 0, 0), 25.0])
+ chdlist1.append([(0, 0, 2), 25.0])
+
+ chdspdict = {0: chdlist0, 1: chdlist1, 2: chdlist0}
+ chd = flopy.mf6.ModflowGwfchd(
+ gwf,
+ stress_period_data=chdspdict,
+ save_flows=False,
+ filename=f"{name}.chd",
+ )
+
+ # wel files
+ # wel = flopy.mf6.ModflowGwfwel(gwf, print_input=True, print_flows=True,
+ # maxbound=len(ws),
+ # periodrecarray=wd6,
+ # save_flows=False)
+ # MAW
+ maw = flopy.mf6.ModflowGwfmaw(
+ gwf,
+ filename=f"{name}.maw",
+ print_input=True,
+ print_head=True,
+ print_flows=True,
+ save_flows=True,
+ observations=well.observations,
+ packagedata=well.packagedata,
+ connectiondata=well.connectiondata,
+ perioddata=well.perioddata,
+ )
+
+ # output control
+ oc = flopy.mf6.ModflowGwfoc(
+ gwf,
+ budget_filerecord=f"{name}.cbc",
+ head_filerecord=f"{name}.hds",
+ headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")],
+ saverecord=[("HEAD", "ALL")],
+ printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")],
+ filename=f"{name}.oc",
+ )
+
+ return sim, None
+
+
+def check_output(workspace):
+ # MODFLOW 6 maw results
+ fpth = os.path.join(workspace, "maw_obs.csv")
+ tc = np.genfromtxt(fpth, names=True, delimiter=",")
+
+ # create known results array
+ tc0 = np.array([100.0, 25.0, 100.0])
+
+ # calculate maximum absolute error
+ diff = tc["MH1"] - tc0
+ diffmax = np.abs(diff).max()
+ dtol = 1e-9
+ msg = f"maximum absolute maw head difference {diffmax}"
+ assert diffmax < dtol, msg + f" exceeds tolerance {dtol}"
+ print(msg)
+
+
+@pytest.mark.parametrize("idx, name", enumerate(cases))
+def test_mf6model(idx, name, function_tmpdir, targets):
+ ws = str(function_tmpdir)
+ sim, _ = build_model(idx, ws, targets["mf6"])
+ sim.write_simulation()
+ sim.run_simulation()
+ check_output(ws)
diff --git a/autotest/test_gwf_maw02.py b/autotest/test_gwf_maw02.py
new file mode 100644
index 00000000000..bd9f42704f3
--- /dev/null
+++ b/autotest/test_gwf_maw02.py
@@ -0,0 +1,289 @@
+import os
+from types import SimpleNamespace
+
+import flopy
+import numpy as np
+import pytest
+
+cases = ["maw02"]
+budtol = 1e-2
+bud_lst = ["GWF_IN", "GWF_OUT", "RATE_IN", "RATE_OUT"]
+krylov = "CG"
+nlay = 1
+nrow = 1
+ncol = 3
+nper = 5
+delr = 300
+delc = 300
+perlen = 5 * [1]
+nstp = 5 * [1]
+tsmult = 5 * [1]
+well = SimpleNamespace(
+ observations={"maw_obs.csv": [("mh1", "head", 1)]},
+ packagedata=[
+ [0, 0.1, 0.0, 100.0, "THIEM", 1],
+ [1, 0.1, 0.0, 100.0, "THIEM", 1],
+ ],
+ connectiondata=[
+ [0, 0, (0, 0, 1), 100.0, 0.0, 1.0, 0.1],
+ [1, 0, (0, 0, 1), 100.0, 0.0, 1.0, 0.1],
+ ],
+ perioddata={
+ 0: [
+ [0, "rate", -20.0],
+ [0, "status", "inactive"],
+ [0, "rate_scaling", 1.0, 15.0],
+ [1, "rate", -30.0],
+ [1, "status", "inactive"],
+ [1, "rate_scaling", 5.0, 15.0],
+ ],
+ 1: [
+ [0, "rate", -110.0],
+ [0, "status", "active"],
+ [1, "rate", -130.0],
+ [1, "status", "active"],
+ ],
+ 3: [[0, "status", "inactive"]],
+ 4: [[0, "status", "active"]],
+ },
+)
+strt = 100
+hk = 1
+nouter = 100
+ninner = 300
+hclose = 1e-9
+rclose = 1e-3
+relaxation_factor = 1
+compare = False
+
+
+def build_model(idx, ws, mf6):
+ name = cases[idx]
+ sim = flopy.mf6.MFSimulation(
+ sim_name=name, version="mf6", exe_name=mf6, sim_ws=ws
+ )
+
+ # create tdis package
+ tdis_rc = [(perlen[i], nstp[i], tsmult[i]) for i in range(nper)]
+ tdis = flopy.mf6.ModflowTdis(
+ sim, time_units="DAYS", nper=nper, perioddata=tdis_rc
+ )
+
+ # create gwf model
+ gwf = flopy.mf6.MFModel(
+ sim,
+ model_type="gwf6",
+ modelname=name,
+ model_nam_file=f"{name}.nam",
+ )
+
+ # create iterative model solution and register the gwf model with it
+ ims = flopy.mf6.ModflowIms(
+ sim,
+ print_option="SUMMARY",
+ outer_dvclose=hclose,
+ outer_maximum=nouter,
+ under_relaxation="NONE",
+ inner_maximum=ninner,
+ inner_dvclose=hclose,
+ rcloserecord=rclose,
+ linear_acceleration=krylov,
+ scaling_method="NONE",
+ reordering_method="NONE",
+ relaxation_factor=relaxation_factor,
+ )
+ sim.register_ims_package(ims, [gwf.name])
+
+ dis = flopy.mf6.ModflowGwfdis(
+ gwf,
+ nlay=nlay,
+ nrow=nrow,
+ ncol=ncol,
+ delr=delr,
+ delc=delc,
+ top=100.0,
+ botm=0.0,
+ idomain=1,
+ filename=f"{name}.dis",
+ )
+
+ # initial conditions
+ ic = flopy.mf6.ModflowGwfic(gwf, strt=strt, filename=f"{name}.ic")
+
+ # node property flow
+ npf = flopy.mf6.ModflowGwfnpf(
+ gwf,
+ save_flows=True,
+ icelltype=1,
+ k=hk,
+ k33=hk,
+ filename=f"{name}.npf",
+ )
+ # storage
+ sto = flopy.mf6.ModflowGwfsto(
+ gwf,
+ save_flows=True,
+ iconvert=1,
+ ss=0.0,
+ sy=0.1,
+ steady_state={0: True},
+ # transient={1: False},
+ filename=f"{name}.sto",
+ )
+
+ # chd files
+ chdlist0 = []
+ chdlist0.append([(0, 0, 0), 100.0])
+ chdlist0.append([(0, 0, 2), 100.0])
+
+ chdlist1 = []
+ chdlist1.append([(0, 0, 0), 25.0])
+ chdlist1.append([(0, 0, 2), 25.0])
+
+ chdspdict = {0: chdlist0, 1: chdlist1, 2: chdlist0}
+ chd = flopy.mf6.ModflowGwfchd(
+ gwf,
+ stress_period_data=chdspdict,
+ save_flows=False,
+ filename=f"{name}.chd",
+ )
+
+ # MAW
+ maw = flopy.mf6.ModflowGwfmaw(
+ gwf,
+ filename=f"{name}.maw",
+ budget_filerecord=f"{name}.maw.cbc",
+ print_input=True,
+ print_head=True,
+ print_flows=True,
+ save_flows=True,
+ observations=well.observations,
+ packagedata=well.packagedata,
+ connectiondata=well.connectiondata,
+ perioddata=well.perioddata,
+ pname="MAW-1",
+ )
+
+ # output control
+ oc = flopy.mf6.ModflowGwfoc(
+ gwf,
+ budget_filerecord=f"{name}.cbc",
+ head_filerecord=f"{name}.hds",
+ headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")],
+ saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")],
+ printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")],
+ filename=f"{name}.oc",
+ )
+
+ return sim, None
+
+
+def eval_results(name, workspace):
+ shape3d = (nlay, nrow, ncol)
+ size3d = nlay * nrow * ncol
+
+ # get results from listing file
+ fpth = os.path.join(workspace, f"{os.path.basename(name)}.lst")
+ budl = flopy.utils.Mf6ListBudget(
+ fpth, budgetkey="MAW-1 BUDGET FOR ENTIRE MODEL AT END OF TIME STEP"
+ )
+ names = list(bud_lst)
+ d0 = budl.get_budget(names=names)[0]
+ dtype = d0.dtype
+ nbud = d0.shape[0]
+
+ # get results from cbc file
+ cbc_bud = ["GWF", "RATE"]
+ d = np.recarray(nbud, dtype=dtype)
+ for key in bud_lst:
+ d[key] = 0.0
+ fpth = os.path.join(workspace, f"{os.path.basename(name)}.maw.cbc")
+ cobj = flopy.utils.CellBudgetFile(fpth, precision="double")
+ kk = cobj.get_kstpkper()
+ times = cobj.get_times()
+ cbc_vals = []
+ for idx, (k, t) in enumerate(zip(kk, times)):
+ for text in cbc_bud:
+ qin = 0.0
+ qout = 0.0
+ v = cobj.get_data(kstpkper=k, text=text)[0]
+ if isinstance(v, np.recarray):
+ vt = np.zeros(size3d, dtype=float)
+ wq = []
+ for jdx, node in enumerate(v["node"]):
+ vt[node - 1] += v["q"][jdx]
+ wq.append(v["q"][jdx])
+ v = vt.reshape(shape3d)
+ if text == cbc_bud[-1]:
+ cbc_vals.append(wq)
+ for kk in range(v.shape[0]):
+ for ii in range(v.shape[1]):
+ for jj in range(v.shape[2]):
+ vv = v[kk, ii, jj]
+ if vv < 0.0:
+ qout -= vv
+ else:
+ qin += vv
+ d["totim"][idx] = t
+ d["time_step"][idx] = k[0]
+ d["stress_period"] = k[1]
+ key = f"{text}_IN"
+ d[key][idx] = qin
+ key = f"{text}_OUT"
+ d[key][idx] = qout
+
+ maw_vals = [
+ [0.000, 0.000],
+ [-106.11303563809453, -96.22598985147631],
+ [-110.000, -130.000],
+ [0.0, -130.000],
+ [-110.000, -130.000],
+ ]
+
+ # evaluate if well rates in cbc file are equal to expected values
+ diffv = []
+ for ovs, svs in zip(maw_vals, cbc_vals):
+ for ov, sv in zip(ovs, svs):
+ diffv.append(ov - sv)
+ diffv = np.abs(np.array(diffv)).max()
+ msg = f"\nmaximum absolute maw rate difference ({diffv})\n"
+
+ # calculate difference between water budget items in the lst and cbc files
+ diff = np.zeros((nbud, len(bud_lst)), dtype=float)
+ for idx, key in enumerate(bud_lst):
+ diff[:, idx] = d0[key] - d[key]
+ diffmax = np.abs(diff).max()
+ msg += f"maximum absolute total-budget difference ({diffmax}) "
+
+ # write summary
+ fpth = os.path.join(workspace, f"{os.path.basename(name)}.bud.cmp.out")
+ f = open(fpth, "w")
+ for i in range(diff.shape[0]):
+ if i == 0:
+ line = f"{'TIME':>10s}"
+ for idx, key in enumerate(bud_lst):
+ line += f"{key + '_LST':>25s}"
+ line += f"{key + '_CBC':>25s}"
+ line += f"{key + '_DIF':>25s}"
+ f.write(line + "\n")
+ line = f"{d['totim'][i]:10g}"
+ for idx, key in enumerate(bud_lst):
+ line += f"{d0[key][i]:25g}"
+ line += f"{d[key][i]:25g}"
+ line += f"{diff[i, idx]:25g}"
+ f.write(line + "\n")
+ f.close()
+
+ assert diffmax < budtol, (
+ msg + f"diffmax {diffmax} exceeds tolerance {budtol}"
+ )
+ assert diffv < budtol, msg + f"diffv {diffv} exceeds tolerance {budtol}"
+
+
+@pytest.mark.parametrize("idx, name", enumerate(cases))
+def test_mf6model(idx, name, function_tmpdir, targets):
+ ws = str(function_tmpdir)
+ sim, _ = build_model(idx, ws, targets["mf6"])
+ sim.write_simulation()
+ sim.run_simulation()
+ eval_results(name, ws)
diff --git a/autotest/test_gwf_maw03.py b/autotest/test_gwf_maw03.py
new file mode 100644
index 00000000000..3f37660960e
--- /dev/null
+++ b/autotest/test_gwf_maw03.py
@@ -0,0 +1,214 @@
+import os
+from types import SimpleNamespace
+
+import flopy
+import numpy as np
+import pytest
+
+cases = ["maw03a", "maw03b", "maw03c"]
+budtol = 1e-2
+bud_lst = ["GWF_IN", "GWF_OUT", "RATE_IN", "RATE_OUT"]
+
+
+def well3(name):
+ perioddata = {
+ "maw03a": [
+ (0, "rate", 2000.0),
+ ],
+ "maw03b": [(0, "rate", 2000.0), (0, "head_limit", 0.4)],
+ "maw03c": [(0, "rate", 2000.0), (0, "rate_scaling", 0.0, 1.0)],
+ }
+ wellbottom = -1000
+ return SimpleNamespace(
+ observations={
+ f"{name}.maw.obs.csv": [
+ ("m1head", "head", (0,)),
+ ("m1rate", "rate", (0,)),
+ ] # is this index one-based? Not if in a tuple
+ },
+ packagedata=[[0, 0.15, wellbottom, 0.0, "THIEM", 1]],
+ connectiondata=[[0, 0, (0, 50, 50), 0.0, wellbottom, 0.0, 0.0]],
+ perioddata=perioddata[name],
+ )
+
+
+krylov = "CG"
+nlay = 1
+nrow = 101
+ncol = 101
+nper = 1
+delr = 142
+delc = 142
+perlen = [1000]
+nstp = [50]
+tsmult = [1.2]
+strt = 0
+hk = 10
+nouter = 100
+ninner = 100
+hclose = 1e-6
+rclose = 1e-6
+relaxation_factor = 1
+compare = False
+
+
+def build_model(idx, ws, mf6):
+ top = 0.0
+ botm = [-1000.0]
+
+ tdis_rc = []
+ for i in range(nper):
+ tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
+
+ name = cases[idx]
+ sim = flopy.mf6.MFSimulation(sim_name=name, sim_ws=ws, exe_name=mf6)
+
+ # create tdis package
+ tdis = flopy.mf6.ModflowTdis(
+ sim, time_units="DAYS", nper=nper, perioddata=tdis_rc
+ )
+
+ # create gwf model
+ gwf = flopy.mf6.MFModel(
+ sim,
+ model_type="gwf6",
+ modelname=name,
+ model_nam_file=f"{name}.nam",
+ )
+
+ # create iterative model solution and register the gwf model with it
+ ims = flopy.mf6.ModflowIms(
+ sim,
+ print_option="SUMMARY",
+ outer_dvclose=hclose,
+ outer_maximum=nouter,
+ under_relaxation="NONE",
+ inner_maximum=ninner,
+ inner_dvclose=hclose,
+ rcloserecord=rclose,
+ linear_acceleration=krylov,
+ scaling_method="NONE",
+ reordering_method="NONE",
+ relaxation_factor=relaxation_factor,
+ )
+ sim.register_ims_package(ims, [gwf.name])
+
+ dis = flopy.mf6.ModflowGwfdis(
+ gwf,
+ nlay=nlay,
+ nrow=nrow,
+ ncol=ncol,
+ delr=delr,
+ delc=delc,
+ top=top,
+ botm=botm,
+ idomain=1,
+ filename=f"{name}.dis",
+ )
+
+ # initial conditions
+ ic = flopy.mf6.ModflowGwfic(gwf, strt=strt, filename=f"{name}.ic")
+
+ # node property flow
+ npf = flopy.mf6.ModflowGwfnpf(
+ gwf,
+ save_flows=True,
+ icelltype=1,
+ k=hk,
+ k33=hk,
+ filename=f"{name}.npf",
+ )
+
+ # storage
+ sto = flopy.mf6.ModflowGwfsto(
+ gwf,
+ save_flows=True,
+ iconvert=0,
+ ss=1.0e-5,
+ sy=0.1,
+ steady_state={0: False},
+ transient={0: True},
+ filename=f"{name}.sto",
+ )
+
+ # MAW
+ well = well3(name)
+ maw = flopy.mf6.ModflowGwfmaw(
+ gwf,
+ filename=f"{name}.maw",
+ print_input=True,
+ print_head=True,
+ print_flows=True,
+ save_flows=True,
+ observations=well.observations,
+ packagedata=well.packagedata,
+ connectiondata=well.connectiondata,
+ perioddata=well.perioddata,
+ )
+
+ # output control
+ oc = flopy.mf6.ModflowGwfoc(
+ gwf,
+ budget_filerecord=f"{name}.cbc",
+ head_filerecord=f"{name}.hds",
+ headprintrecord=[
+ ("COLUMNS", ncol, "WIDTH", 15, "DIGITS", 6, "GENERAL")
+ ],
+ saverecord=[("HEAD", "ALL")],
+ printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")],
+ filename=f"{name}.oc",
+ )
+
+ # head observations
+ obs_data0 = [("head_well_cell", "HEAD", (0, 0, 0))]
+ obs_recarray = {f"{name}.obs.csv": obs_data0}
+ obs = flopy.mf6.ModflowUtlobs(
+ gwf,
+ pname="head_obs",
+ filename=f"{name}.obs",
+ digits=15,
+ print_input=True,
+ continuous=obs_recarray,
+ )
+
+ return sim
+
+
+def eval_results(name, workspace):
+ # MODFLOW 6 maw results
+ test_name = name
+ fpth = os.path.join(workspace, f"{test_name}.maw.obs.csv")
+ tc = np.genfromtxt(fpth, names=True, delimiter=",")
+
+ if test_name.endswith("a"):
+ # M1RATE should be 2000.
+ msg = "The injection rate should be 2000. for all times"
+ assert tc["M1RATE"].min() == tc["M1RATE"].max() == 2000, msg
+
+ elif test_name.endswith("b"):
+ # M1RATE should have a minimum value less than 200 and
+ # M1HEAD should not exceed 0.400001
+ msg = (
+ "Injection rate should fall below 200 and the head should not"
+ "exceed 0.4"
+ )
+ assert tc["M1RATE"].min() < 200.0, msg
+ assert tc["M1HEAD"].max() < 0.400001, msg
+
+ elif test_name.endswith("c"):
+ # M1RATE should have a minimum value less than 800
+ # M1HEAD should not exceed 1.0.
+ msg = (
+ "Min injection rate should be less than 800 and well "
+ "head should not exceed 1.0"
+ )
+ assert tc["M1RATE"].min() < 800.0 and tc["M1HEAD"].max() < 1.0, msg
+
+
+@pytest.mark.parametrize("idx, name", enumerate(cases))
+def test_mf6model(idx, name, function_tmpdir, targets):
+ ws = str(function_tmpdir)
+ sim = build_model(idx, ws, targets["mf6"])
+ sim.write_simulation()
+ sim.run_simulation()
+ eval_results(name, ws)
diff --git a/autotest/test_gwf_maw04.py b/autotest/test_gwf_maw04.py
index e77656f0c68..dbbe44cc4c4 100644
--- a/autotest/test_gwf_maw04.py
+++ b/autotest/test_gwf_maw04.py
@@ -3,8 +3,7 @@
import flopy
import numpy as np
-from modflow_devtools.case import Case
-from pytest_cases import parametrize
+import pytest
# temporal discretization
nper = 2
@@ -101,198 +100,172 @@ def well4(label):
)
-case = Case(
- name="maw_iss305",
- nlay=nlay,
- nrow=nrow,
- ncol=ncol,
- nper=nper,
- delr=delr,
- perlen=perlen,
- nstp=nstp,
- tsmult=tsmult,
- steady=steady,
- strt=0,
- hk=10,
- nouter=100,
- ninner=100,
- hclose=1e-9,
- rclose=1e-6,
- relax=1,
- top=top,
- botm=botm,
- confined=confined,
- ss=ss,
- chd_spd=chd_spd,
- chd5_spd=chd5_spd,
- nhalf=nhalf,
- radius=radius,
- wellq=wellq,
- compare=False,
-)
-cases = [case.copy_update(name=case.name + "a", well=well4("a"),)] + [
- case.copy_update(name=case.name + label, well=well4(label), xfail=True)
- for label in [
- "b",
- # "c", # todo: this one passes when it should fail
- "d",
- "e",
- "f",
- ]
-]
+# npf data
+strt = 0
+hk = 10
+# solver
+nouter = 100
+ninner = 100
+hclose = 1e-9
+rclose = 1e-6
+relax = 1
-class GwfMaw04Cases:
- @parametrize(data=cases, ids=[c.name for c in cases])
- def case_4(self, function_tmpdir, targets, data):
- name = data.name
- ws = str(function_tmpdir)
+# subproblems
+subprobs = ["a", "b", "c", "d", "e", "f"]
+ex = [f"maw_iss305{sp}" for sp in subprobs]
+wells = [well4(sp) for sp in subprobs]
+xfail = [False, True, True, True, True, True]
- # build MODFLOW 6 files
- sim = flopy.mf6.MFSimulation(
- sim_name=name, version="mf6", exe_name=targets["mf6"], sim_ws=ws
- )
- # create tdis package
- tdis_rc = []
- for idx in range(data.nper):
- tdis_rc.append(
- (data.perlen[idx], data.nstp[idx], data.tsmult[idx])
- )
- tdis = flopy.mf6.ModflowTdis(
- sim, time_units="DAYS", nper=data.nper, perioddata=tdis_rc
- )
- # create iterative model solution
- ims = flopy.mf6.ModflowIms(
- sim,
- inner_dvclose=data.hclose,
- rcloserecord=data.rclose,
- outer_dvclose=data.hclose,
- )
+def build_model(idx, ws, mf6):
+ name = ex[idx]
+ well = wells[idx]
+ # build MODFLOW 6 files
+ sim = flopy.mf6.MFSimulation(
+ sim_name=name, version="mf6", exe_name=mf6, sim_ws=ws
+ )
+ # create tdis package
+ tdis_rc = []
+ for kper in range(nper):
+ tdis_rc.append((perlen[kper], nstp[kper], tsmult[kper]))
+ tdis = flopy.mf6.ModflowTdis(
+ sim, time_units="DAYS", nper=nper, perioddata=tdis_rc
+ )
- # create gwf model
- gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True)
+ # create iterative model solution
+ ims = flopy.mf6.ModflowIms(
+ sim,
+ inner_dvclose=hclose,
+ rcloserecord=rclose,
+ outer_dvclose=hclose,
+ )
- # discretization
- dis = flopy.mf6.ModflowGwfdis(
- gwf,
- nlay=data.nlay,
- nrow=data.nrow,
- ncol=data.ncol,
- delr=data.delr,
- delc=data.delr,
- top=data.top,
- botm=data.botm,
- )
- # initial conditions
- ic = flopy.mf6.ModflowGwfic(gwf, strt=data.strt)
+ # create gwf model
+ gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True)
- # node property flow
- npf = flopy.mf6.ModflowGwfnpf(
- gwf, save_flows=False, icelltype=data.confined, k=data.hk
- )
- # storage
- sto = flopy.mf6.ModflowGwfsto(
- gwf,
- save_flows=False,
- iconvert=data.confined,
- ss=data.ss,
- steady_state={0: True},
- transient={1: True},
+ # discretization
+ dis = flopy.mf6.ModflowGwfdis(
+ gwf,
+ nlay=nlay,
+ nrow=nrow,
+ ncol=ncol,
+ delr=delr,
+ delc=delr,
+ top=top,
+ botm=botm,
+ )
+ # initial conditions
+ ic = flopy.mf6.ModflowGwfic(gwf, strt=strt)
+
+ # node property flow
+ npf = flopy.mf6.ModflowGwfnpf(
+ gwf, save_flows=False, icelltype=confined, k=hk
+ )
+ # storage
+ sto = flopy.mf6.ModflowGwfsto(
+ gwf,
+ save_flows=False,
+ iconvert=confined,
+ ss=ss,
+ steady_state={0: True},
+ transient={1: True},
+ )
+ # constant head
+ chd = flopy.mf6.ModflowGwfchd(
+ gwf, stress_period_data=chd_spd, save_flows=False
+ )
+ # multi-aquifer well
+ maw = flopy.mf6.ModflowGwfmaw(
+ gwf,
+ print_input=well.print_input,
+ no_well_storage=well.no_well_storage,
+ packagedata=well.packagedata,
+ connectiondata=well.connectiondata,
+ perioddata=well.perioddata,
+ )
+ # output control
+ oc = flopy.mf6.ModflowGwfoc(
+ gwf,
+ budget_filerecord=f"{name}.cbc",
+ head_filerecord=f"{name}.hds",
+ saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")],
+ )
+ # build MODFLOW-2005 files
+ if xfail[idx]:
+ mc = None
+ else:
+ cmppth = "mf2005"
+ ws = os.path.join(ws, cmppth)
+ mc = flopy.modflow.Modflow(name, model_ws=ws, version=cmppth)
+ dis = flopy.modflow.ModflowDis(
+ mc,
+ nlay=nlay,
+ nrow=nrow,
+ ncol=ncol,
+ nper=nper,
+ perlen=perlen,
+ nstp=nstp,
+ tsmult=tsmult,
+ steady=steady,
+ delr=delr,
+ delc=delr,
+ top=top,
+ botm=botm,
)
- # constant head
- chd = flopy.mf6.ModflowGwfchd(
- gwf, stress_period_data=data.chd_spd, save_flows=False
+ bas = flopy.modflow.ModflowBas(mc, strt=strt)
+ lpf = flopy.modflow.ModflowLpf(
+ mc,
+ laytyp=confined,
+ hk=hk,
+ vka=hk,
+ ss=ss,
+ sy=0,
)
- # multi-aquifer well
- maw = flopy.mf6.ModflowGwfmaw(
- gwf,
- print_input=data.well.print_input,
- no_well_storage=data.well.no_well_storage,
- packagedata=data.well.packagedata,
- connectiondata=data.well.connectiondata,
- perioddata=data.well.perioddata,
+ chd = flopy.modflow.ModflowChd(mc, stress_period_data=chd5_spd)
+ # mnw2
+ # empty mnw2 file to create recarrays
+ mnw2 = flopy.modflow.ModflowMnw2(mc)
+ node_data = mnw2.get_empty_node_data(2)
+ node_data["ztop"] = np.array([top, botm[0]])
+ node_data["zbotm"] = np.array([botm[0], botm[1]])
+ node_data["i"] = np.array([nhalf, nhalf])
+ node_data["j"] = np.array([nhalf, nhalf])
+ node_data["wellid"] = np.array(["well1", "well1"])
+ node_data["losstype"] = np.array(["skin", "skin"])
+ node_data["rw"] = np.array([radius, radius])
+ node_data["rskin"] = np.array([sradius[name[-1]], sradius[name[-1]]])
+ hks = hk * skin_mult[name[-1]]
+ node_data["kskin"] = np.array([hks, hks])
+ dtype = [("wellid", np.unicode_, 20), ("qdes", "
+ #
mawpackagedata = [
[0, mawradius, mawbottom, mstrt, mawcondeqn, mawngwfnodes]
]
- #
+ #
mawconnectiondata = [
[0, icon, (icon, 0, 0), top, mawbottom, -999.0, -999.0]
for icon in range(nlay)
]
- #
+ #
mawperioddata = [[0, "STATUS", "ACTIVE"]]
maw = flopy.mf6.ModflowGwfmaw(
gwf,
@@ -177,26 +179,24 @@ def build_model(idx, dir):
return sim, None
-def eval_results(sim):
- print("evaluating results...")
-
+def check_output(idx, test):
# calculate volume of water and make sure it is conserved
- name = ex[sim.idxsim]
+ name = cases[idx]
gwfname = "gwf_" + name
fname = gwfname + ".maw.bin"
- fname = os.path.join(sim.simpath, fname)
+ fname = os.path.join(test.workspace, fname)
assert os.path.isfile(fname)
bobj = flopy.utils.HeadFile(fname, text="HEAD")
stage = bobj.get_alldata().flatten()
fname = gwfname + ".hds"
- fname = os.path.join(sim.simpath, fname)
+ fname = os.path.join(test.workspace, fname)
assert os.path.isfile(fname)
hobj = flopy.utils.HeadFile(fname)
head = hobj.get_alldata()
# calculate initial volume of water in well and aquifer
- v0maw = mawstrt[sim.idxsim] * np.pi * 0.1**2
+ v0maw = mawstrt[idx] * np.pi * 0.1**2
v0gwf = 4 * 7 * 0.3
v0 = v0maw + v0gwf
top = [4.0, 3.0, 2.0, 1.0]
@@ -214,7 +214,6 @@ def eval_results(sim):
# calculate current volume of water in well and aquifer and compare with
# initial volume
for kstp, mawstage in enumerate(stage):
-
vgwf = 0
for k in range(nlay):
for j in range(ncol):
@@ -241,17 +240,14 @@ def eval_results(sim):
)
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- ws = str(function_tmpdir)
- test = TestFramework()
- test.build(build_model, idx, ws)
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, exfunc=eval_results, idxsim=idx
- ),
- ws,
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ compare="mf6_regression",
)
+ test.run()
diff --git a/autotest/test_gwf_maw06.py b/autotest/test_gwf_maw06.py
index 7a4748b059c..d16b04135b2 100644
--- a/autotest/test_gwf_maw06.py
+++ b/autotest/test_gwf_maw06.py
@@ -1,16 +1,18 @@
-# Test maw package ability to equalize and the flow correction.
-# maw_06a - well start at .25, aquifer starts at 2
-# maw_06b - well starts at 2, aquifer starts at .25
+"""
+Test maw package ability to equalize the flow correction.
+maw_06a - well start at .25, aquifer starts at 2
+maw_06b - well starts at 2, aquifer starts at .25
+"""
import os
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
-ex = ["maw_06a", "maw_06b"]
+cases = ["maw_06a", "maw_06b"]
nlay = 2
nrow = 1
@@ -47,7 +49,7 @@
mawcond = Kh * delc * dz / (0.5 * delr)
-def build_model(idx, dir):
+def build_models(idx, test):
nper = 1
perlen = [10.0]
nstp = [100]
@@ -60,10 +62,10 @@ def build_model(idx, dir):
nouter, ninner = 700, 200
hclose, rclose, relax = 1e-9, 1e-9, 1.0
- name = ex[idx]
+ name = cases[idx]
# build MODFLOW 6 files
- ws = dir
+ ws = test.workspace
sim = flopy.mf6.MFSimulation(
sim_name=name,
version="mf6",
@@ -131,14 +133,14 @@ def build_model(idx, dir):
mstrt = mawstrt[idx]
mawcondeqn = "SPECIFIED"
mawngwfnodes = nlay
- #
+ #
mawpackagedata = [[0, mawradius, bot, mstrt, mawcondeqn, mawngwfnodes]]
- #
+ #
mawconnectiondata = [
[0, icon, (icon, 0, 0), top, bot, mawcond, -999]
for icon in range(nlay)
]
- #
+ #
mawperioddata = [[0, "STATUS", "ACTIVE"]]
mbin = f"{gwfname}.maw.bin"
mbud = f"{gwfname}.maw.bud"
@@ -197,27 +199,25 @@ def build_model(idx, dir):
return sim, None
-def eval_results(sim):
- print("evaluating results...")
-
+def check_output(idx, test):
# calculate volume of water and make sure it is conserved
- name = ex[sim.idxsim]
+ name = cases[idx]
gwfname = "gwf_" + name
fname = gwfname + ".maw.bin"
- fname = os.path.join(sim.simpath, fname)
+ fname = os.path.join(test.workspace, fname)
assert os.path.isfile(fname)
bobj = flopy.utils.HeadFile(fname, text="HEAD")
stage = bobj.get_alldata().flatten()
fname = gwfname + ".hds"
- fname = os.path.join(sim.simpath, fname)
+ fname = os.path.join(test.workspace, fname)
assert os.path.isfile(fname)
hobj = flopy.utils.HeadFile(fname)
head = hobj.get_alldata()
# calculate initial volume of water in well and aquifer
- v0maw = mawstrt[sim.idxsim] * mawarea
- v0gwf = (gwfstrt[sim.idxsim] - bot) * sy * gwfarea
+ v0maw = mawstrt[idx] * mawarea
+ v0gwf = (gwfstrt[idx] - bot) * sy * gwfarea
v0 = v0maw + v0gwf
print(
@@ -257,13 +257,13 @@ def eval_results(sim):
# compare the maw-gwf flows with the gwf-maw flows
fname = gwfname + ".maw.bud"
- fname = os.path.join(sim.simpath, fname)
+ fname = os.path.join(test.workspace, fname)
assert os.path.isfile(fname)
mbud = flopy.utils.CellBudgetFile(fname, precision="double")
maw_gwf = mbud.get_data(text="GWF")
fname = gwfname + ".cbc"
- fname = os.path.join(sim.simpath, fname)
+ fname = os.path.join(test.workspace, fname)
assert os.path.isfile(fname)
gbud = flopy.utils.CellBudgetFile(fname, precision="double")
gwf_maw = gbud.get_data(text="MAW")
@@ -279,17 +279,14 @@ def eval_results(sim):
assert np.allclose(qmaw, -qgwf), msg
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- ws = str(function_tmpdir)
- test = TestFramework()
- test.build(build_model, idx, ws)
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, exfunc=eval_results, idxsim=idx
- ),
- ws,
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ compare="mf6_regression",
)
+ test.run()
diff --git a/autotest/test_gwf_maw07.py b/autotest/test_gwf_maw07.py
index c92ff6e105b..8ddede3585e 100644
--- a/autotest/test_gwf_maw07.py
+++ b/autotest/test_gwf_maw07.py
@@ -1,16 +1,18 @@
-# Modifiy the previous test by having a first stress period where the
-# MAW well is inactive. Test ensures that gwf-maw and maw-gwf flows reported
-# in the gwf and maw budget files are zero for this first period.
+"""
+Modify the previous test by having a first stress period where the
+MAW well is inactive. Test ensures that gwf-maw and maw-gwf flows
+in the gwf and maw budget files are zero for this first period.
+"""
import os
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
-ex = ["maw_07a", "maw_07b"]
+cases = ["maw_07a", "maw_07b"]
nlay = 2
nrow = 1
@@ -47,7 +49,7 @@
mawcond = Kh * delc * dz / (0.5 * delr)
-def build_model(idx, dir):
+def build_models(idx, test):
nper = 2
perlen = [10.0, 10.0]
nstp = [1, 100]
@@ -60,10 +62,10 @@ def build_model(idx, dir):
nouter, ninner = 700, 200
hclose, rclose, relax = 1e-9, 1e-9, 1.0
- name = ex[idx]
+ name = cases[idx]
# build MODFLOW 6 files
- ws = dir
+ ws = test.workspace
sim = flopy.mf6.MFSimulation(
sim_name=name,
version="mf6",
@@ -131,14 +133,14 @@ def build_model(idx, dir):
mstrt = mawstrt[idx]
mawcondeqn = "SPECIFIED"
mawngwfnodes = nlay
- #
+ #
mawpackagedata = [[0, mawradius, bot, mstrt, mawcondeqn, mawngwfnodes]]
- #
+ #
mawconnectiondata = [
[0, icon, (icon, 0, 0), top, bot, mawcond, -999]
for icon in range(nlay)
]
- #
+ #
mawperioddata = {}
mawperioddata[0] = [[0, "STATUS", "INACTIVE"]]
mawperioddata[1] = [[0, "STATUS", "ACTIVE"]]
@@ -199,27 +201,25 @@ def build_model(idx, dir):
return sim, None
-def eval_results(sim):
- print("evaluating results...")
-
+def check_output(idx, test):
# calculate volume of water and make sure it is conserved
- name = ex[sim.idxsim]
+ name = cases[idx]
gwfname = "gwf_" + name
fname = gwfname + ".maw.bin"
- fname = os.path.join(sim.simpath, fname)
+ fname = os.path.join(test.workspace, fname)
assert os.path.isfile(fname)
bobj = flopy.utils.HeadFile(fname, text="HEAD")
stage = bobj.get_alldata().flatten()[1:]
fname = gwfname + ".hds"
- fname = os.path.join(sim.simpath, fname)
+ fname = os.path.join(test.workspace, fname)
assert os.path.isfile(fname)
hobj = flopy.utils.HeadFile(fname)
head = hobj.get_alldata()[1:]
# calculate initial volume of water in well and aquifer
- v0maw = mawstrt[sim.idxsim] * mawarea
- v0gwf = (gwfstrt[sim.idxsim] - bot) * sy * gwfarea
+ v0maw = mawstrt[idx] * mawarea
+ v0gwf = (gwfstrt[idx] - bot) * sy * gwfarea
v0 = v0maw + v0gwf
print(
@@ -259,13 +259,13 @@ def eval_results(sim):
# compare the maw-gwf flows with the gwf-maw flows
fname = gwfname + ".maw.bud"
- fname = os.path.join(sim.simpath, fname)
+ fname = os.path.join(test.workspace, fname)
assert os.path.isfile(fname)
mbud = flopy.utils.CellBudgetFile(fname, precision="double")
maw_gwf = mbud.get_data(text="GWF")
fname = gwfname + ".cbc"
- fname = os.path.join(sim.simpath, fname)
+ fname = os.path.join(test.workspace, fname)
assert os.path.isfile(fname)
gbud = flopy.utils.CellBudgetFile(fname, precision="double")
gwf_maw = gbud.get_data(text="MAW")
@@ -285,17 +285,14 @@ def eval_results(sim):
assert np.allclose(qmaw, -qgwf), msg
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- ws = str(function_tmpdir)
- test = TestFramework()
- test.build(build_model, idx, ws)
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, exfunc=eval_results, idxsim=idx
- ),
- ws,
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ targets=targets,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: check_output(idx, t),
+ compare="mf6_regression",
)
+ test.run()
diff --git a/autotest/test_gwf_maw08.py b/autotest/test_gwf_maw08.py
index e4f423298f9..94e7dfa36f7 100644
--- a/autotest/test_gwf_maw08.py
+++ b/autotest/test_gwf_maw08.py
@@ -1,15 +1,17 @@
-# test to evaluate Newton-Raphson solution for a single column steady-state
-# dry multi-aquifer well problem. Developed to address issue #546
+"""
+Test Newton-Raphson solution for a single column steady-state
+dry multi-aquifer well problem. Developed to address issue #546
+"""
import os
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
-ex = ("maw_08a", "maw_08b")
+cases = ("maw_08a", "maw_08b")
dis_option = ("dis", "disv")
nlay = 3
@@ -42,13 +44,13 @@
radius = 0.05
-def build_model(idx, dir):
+def build_models(idx, test):
dvclose, rclose, relax = 1e-9, 1e-9, 1.0
- name = ex[idx]
+ name = cases[idx]
# build MODFLOW 6 files
- ws = dir
+ ws = test.workspace
sim = flopy.mf6.MFSimulation(
sim_name=name,
version="mf6",
@@ -121,7 +123,7 @@ def build_model(idx, dir):
k33=Kv,
)
- #
+ #
mawpackagedata = flopy.mf6.ModflowGwfmaw.packagedata.empty(gwf, maxbound=1)
mawpackagedata["radius"] = radius
mawpackagedata["bottom"] = maw_bot
@@ -129,7 +131,7 @@ def build_model(idx, dir):
mawpackagedata["condeqn"] = "thiem"
mawpackagedata["ngwfnodes"] = 2
- #
+ #
mawconnectiondata = flopy.mf6.ModflowGwfmaw.connectiondata.empty(
gwf, maxbound=2
)
@@ -194,14 +196,12 @@ def build_model(idx, dir):
return sim, None
-def eval_results(sim):
- print("evaluating results...")
-
+def eval_results(idx, test):
# calculate volume of water and make sure it is conserved
- name = ex[sim.idxsim]
+ name = cases[idx]
gwfname = "gwf_" + name
fname = gwfname + ".maw.bin"
- fname = os.path.join(sim.simpath, fname)
+ fname = os.path.join(test.workspace, fname)
assert os.path.isfile(fname)
bobj = flopy.utils.HeadFile(fname, text="HEAD")
@@ -211,20 +211,20 @@ def eval_results(sim):
), f"simulated maw head ({well_head[0]}) does not equal 10."
fname = gwfname + ".hds"
- fname = os.path.join(sim.simpath, fname)
+ fname = os.path.join(test.workspace, fname)
assert os.path.isfile(fname)
hobj = flopy.utils.HeadFile(fname)
head = hobj.get_alldata()[1:]
# compare the maw-gwf flows with the gwf-maw flows
fname = gwfname + ".maw.bud"
- fname = os.path.join(sim.simpath, fname)
+ fname = os.path.join(test.workspace, fname)
assert os.path.isfile(fname)
mbud = flopy.utils.CellBudgetFile(fname, precision="double")
maw_gwf = mbud.get_data(text="GWF")
fname = gwfname + ".cbc"
- fname = os.path.join(sim.simpath, fname)
+ fname = os.path.join(test.workspace, fname)
assert os.path.isfile(fname)
gbud = flopy.utils.CellBudgetFile(fname, precision="double")
gwf_maw = gbud.get_data(text="MAW")
@@ -240,17 +240,13 @@ def eval_results(sim):
assert np.allclose(qmaw, -qgwf), msg
-@pytest.mark.parametrize(
- "idx, name",
- list(enumerate(ex)),
-)
+@pytest.mark.parametrize("idx, name", enumerate(cases))
def test_mf6model(idx, name, function_tmpdir, targets):
- ws = str(function_tmpdir)
- test = TestFramework()
- test.build(build_model, idx, ws)
- test.run(
- TestSimulation(
- name=name, exe_dict=targets, exfunc=eval_results, idxsim=idx
- ),
- ws,
+ test = TestFramework(
+ name=name,
+ workspace=function_tmpdir,
+ build=lambda t: build_models(idx, t),
+ check=lambda t: eval_results(idx, t),
+ targets=targets,
)
+ test.run()
diff --git a/autotest/test_gwf_maw09.py b/autotest/test_gwf_maw09.py
index 48541261bb7..730e5cb0c8d 100644
--- a/autotest/test_gwf_maw09.py
+++ b/autotest/test_gwf_maw09.py
@@ -1,15 +1,17 @@
-# test to evaluate Newton-Raphson solution for a single column transient
-# dry multi-aquifer well problem. Developed to address issue #546
+"""
+test to evaluate Newton-Raphson solution for a single column transient
+dry multi-aquifer well problem. Developed to address issue #546
+"""
import os
import flopy
import numpy as np
import pytest
+
from framework import TestFramework
-from simulation import TestSimulation
-ex = ("maw_09a", "maw_09b", "maw_09c", "maw_09d")
+cases = ("maw_09a", "maw_09b", "maw_09c", "maw_09d")
dis_option = ("dis", "dis", "disv", "disv")
flow_correction = (None, True, None, True)
@@ -44,13 +46,13 @@
radius = np.sqrt(1.0 / np.pi)
-def build_model(idx, dir):
+def build_models(idx, test):
dvclose, rclose, relax = 1e-9, 1e-9, 1.0
- name = ex[idx]
+ name = cases[idx]
# build MODFLOW 6 files
- ws = dir
+ ws = test.workspace
sim = flopy.mf6.MFSimulation(
sim_name=name,
version="mf6",
@@ -146,7 +148,7 @@ def build_model(idx, dir):
gwf, ss=0.0, sy=1.0, transient=True, iconvert=1
)
- #
+ #