Skip to content

Commit

Permalink
Merge pull request #19 from ITISFoundation/add_postinstall_tests
Browse files Browse the repository at this point in the history
Add tests after building the wheel
  • Loading branch information
wvangeit authored Sep 5, 2024
2 parents 8374725 + d0ecec3 commit 4e149b0
Show file tree
Hide file tree
Showing 17 changed files with 440 additions and 10 deletions.
18 changes: 14 additions & 4 deletions .github/workflows/build-wheels.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,14 +32,19 @@ jobs:
fail-fast: true
matrix:
os: [ubuntu-latest]
python: [cp38,cp39,cp310,cp311,cp312]
python: ['3.8', '3.9', '3.10', '3.11', '3.12']
arch: [x86_64]
env:
SCCACHE_GHA_ENABLED: "on"
permissions:
contents: write
needs: tag
steps:
- name: Set cibuildwheel Python version
run: |
python_version="${{ matrix.python }}"
cibw_python="cp${python_version/./}"
echo "CIBW_PYTHON=$cibw_python" >> $GITHUB_ENV
- uses: actions/checkout@v4
if: github.ref != 'refs/heads/master'
with:
Expand All @@ -54,16 +59,21 @@ jobs:
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: 3.11
python-version: ${{ matrix.python }}
- name: Get dakota src
run: make get-dakota-src
- uses: pypa/cibuildwheel@v2.16
env:
CIBW_BUILD: ${{ matrix.python }}*${{ matrix.arch }}
CIBW_BUILD: ${{ env.CIBW_PYTHON }}*${{ matrix.arch }}
- name: Run tests
run: |
pip install pytest
pip install ./wheelhouse/itis_dakota*.whl
make test
- name: Upload wheels
uses: actions/upload-artifact@v4
with:
name: wheels-${{ matrix.os }}-${{ matrix.python }}-${{ matrix.arch }}
name: wheels-${{ matrix.os }}-${{ env.CIBW_PYTHON }}-${{ matrix.arch }}
path: ./wheelhouse/*.whl

release:
Expand Down
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,4 @@ dakota
dakota_wheel-*
wheelhouse
itis_dakota/_version.py
/wheel
10 changes: 5 additions & 5 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,12 @@ set(PY_BUILD_CMAKE_LIB_DIRECTORY ${PY_BUILD_CMAKE_PACKAGE_NAME}-${PY_BUILD_CMAKE

add_subdirectory(dakota EXCLUDE_FROM_ALL)

add_custom_target(dakota_for_python ALL DEPENDS environment dakota_src ${Dakota_TPL_LIBRARIES})
add_custom_target(dakota_for_python ALL DEPENDS environment dakota_src)

#install(TARGETS dakota
# RUNTIME
# COMPONENT dakota_for_python
# DESTINATION ${PY_BUILD_CMAKE_BIN_DIRECTORY})
# install(TARGETS dakota
# RUNTIME
# COMPONENT dakota_for_python
# DESTINATION ${PY_BUILD_CMAKE_BIN_DIRECTORY})

install(TARGETS dakota_src
LIBRARY
Expand Down
8 changes: 7 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,15 @@ all:
wheel: cache-clean clean
CIBW_BUILD=cp311*x86_64 cibuildwheel --platform linux

install:
test:
python -m pytest

install: cache-clean
pip install -v .

pipwheel: cache-clean clean
pip wheel -v . -w wheel

clean:
rm -rf dist/ wheel/ build/ *.whl wheelhouse/

Expand Down
3 changes: 3 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -77,3 +77,6 @@ DAKOTA_PYTHON_WRAPPER = "ON"
DAKOTA_PYTHON_DIRECT_INTERFACE = "ON"
BUILD_SHARED_LIBS = "OFF"
CMAKE_POSITION_INDEPENDENT_CODE = "ON"

[tool.pytest.ini_options]
addopts = "--ignore=dakota/"
48 changes: 48 additions & 0 deletions tests/adasampling/adasampling.in
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
environment
tabular_data
tabular_data_file
'dakota_tabular.dat'
top_method_pointer = 'ADAPTIVE_SAMPLING'

method
id_method = 'ADAPTIVE_SAMPLING'
adaptive_sampling
max_iterations 5
samples_on_emulator 20
fitness_metric gradient
initial_samples = 1
model_pointer = "TRUE_MODEL"
seed 41
batch_selection
naive
misc_options
'batch_size=10'

model
id_model = 'TRUE_MODEL'
single
interface_pointer = 'INTERFACE'
variables_pointer = 'VARIABLES'
responses_pointer = 'RESPONSES'

variables
id_variables = 'VARIABLES'
continuous_design = 2
descriptors 'PARAM1' 'PARAM2'
initial_point 0.5 1.0
lower_bounds 0.0 0.0
upper_bounds 1.0 2.0

interface,
id_interface = 'INTERFACE'
batch
python
analysis_drivers
'evaluator'

responses
id_responses = 'RESPONSES'
descriptors 'OBJ1' 'OBJ2'
objective_functions = 2
no_gradients
no_hessians
36 changes: 36 additions & 0 deletions tests/adasampling/disabled_adasampling.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
import os
import pathlib as pl

import dakota.environment as dakenv

script_dir = pl.Path(__file__).parent


def evaluator(inputs):
# Get the continuous variables out of the input provided by dakota
params = inputs["cv"]
print(f"Evaluating {params}")

# Put the objective in the dakota 'fns' field of the output
outputs = {"fns": params}
return outputs


def batch_evaluator(batch_input):
return map(evaluator, batch_input)


def test_adasampling(tmp_path):
print("Starting dakota")
os.chdir(tmp_path)
dakota_conf_path = script_dir / "adasampling.in"
dakota_conf = dakota_conf_path.read_text()
study = dakenv.study(
callbacks={"evaluator": batch_evaluator}, input_string=dakota_conf
)

study.execute()


if __name__ == "__main__":
test_adasampling()
46 changes: 46 additions & 0 deletions tests/moga/moga.in
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
# Specify the output file for tabular data
environment
tabular_data
tabular_data_file
'dakota_tabular.dat'
top_method_pointer = 'MULTIOBJ_OPTIMIZATION'

# Define the optimization method
method
id_method = 'MULTIOBJ_OPTIMIZATION'
moga # Multi-Objective Genetic Algorithm
model_pointer = "TRUE_MODEL"
seed 1234 # Set random seed for reproducibility
max_function_evaluations 100 # Maximum number of function evaluations

# Define the model
model
id_model = 'TRUE_MODEL'
single
interface_pointer = 'INTERFACE'
variables_pointer = 'VARIABLES'
responses_pointer = 'RESPONSES'

# Define the variables
variables
id_variables = 'VARIABLES'
continuous_design = 3 # Number of continuous design variables
descriptors 'PARAM1' 'PARAM2' 'PARAM3'
initial_point 2.0 3.0 4.0
lower_bounds 0.0 0.0 0.0
upper_bounds 10.0 10.0 10.0

# Define the interface
interface
id_interface = 'INTERFACE'
python
analysis_drivers
'evaluator' # Python script to evaluate the objectives

# Define the responses
responses
id_responses = 'RESPONSES'
descriptors 'OBJ1' 'OBJ2'
objective_functions = 2 # Number of objective functions
no_gradients # Gradients will not be provided
no_hessians # Hessians will not be provided
43 changes: 43 additions & 0 deletions tests/moga/test_moga.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
import os
import pathlib as pl

import dakota.environment as dakenv

script_dir = pl.Path(__file__).parent

def evaluate(x, y, z):
# Objective 1: Minimize the sum of squares
obj1 = x**2 + y**2 + z**2

# Objective 2: Maximize the product
obj2 = -(x * y * z) # Negated because we conventionally minimize

return obj1, obj2


def evaluator(inputs):
# Get the continuous variables out of the input provided by dakota
params = inputs["cv"]
#print(f"Evaluating {params}")

# Put the objective in the dakota 'fns' field of the output
outputs = {"fns": evaluate(*params)}
return outputs


def test_moga(tmp_path):
print("Starting dakota")

os.chdir(tmp_path)
dakota_conf_path = script_dir / "moga.in"
dakota_conf = dakota_conf_path.read_text()
study = dakenv.study(
callbacks={"evaluator": evaluator},
input_string=dakota_conf,
)

study.execute()


if __name__ == "__main__":
test_moga()
44 changes: 44 additions & 0 deletions tests/simple/simple.in
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
environment
tabular_data
tabular_data_file
'dakota_tabular.dat'
top_method_pointer = 'SAMPLING'

method
id_method = 'SAMPLING'
sampling
sample_type lhs
samples 10
model_pointer = "TRUE_MODEL"
seed 1234

model
id_model = 'TRUE_MODEL'
single
interface_pointer = 'INTERFACE'
variables_pointer = 'VARIABLES'
responses_pointer = 'RESPONSES'

variables
id_variables = 'VARIABLES'
continuous_design = 2
descriptors 'PARAM1' 'PARAM2'
initial_point 0.5 1.0
lower_bounds 0.0 0.0
upper_bounds 1.0 2.0

interface,
id_interface = 'INTERFACE'
python
analysis_drivers
'evaluator'
failure_capture
recover
NaN NaN

responses
id_responses = 'RESPONSES'
descriptors 'OBJ1' 'OBJ2'
objective_functions = 2
no_gradients
no_hessians
37 changes: 37 additions & 0 deletions tests/simple/test_simple.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
import os
import pathlib as pl

import dakota.environment as dakenv

script_dir = pl.Path(__file__).parent


def evaluator(inputs):
# Get the continuous variables out of the input provided by dakota
params = inputs["cv"]
print(f"Evaluating {params}")

# Put the objective in the dakota 'fns' field of the output
outputs = {"fns": params, "failure": 1}

#return Exception()
return outputs


def test_simple(tmp_path):
os.chdir(tmp_path)

print("Starting dakota")

dakota_conf_path = script_dir / "simple.in"
dakota_conf = dakota_conf_path.read_text()
study = dakenv.study(
callbacks={"evaluator": evaluator},
input_string=dakota_conf,
)

study.execute()


if __name__ == "__main__":
test_simple()
42 changes: 42 additions & 0 deletions tests/simple_batch/simple_batch.in
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
environment
tabular_data
tabular_data_file
'dakota_tabular.dat'
top_method_pointer = 'SAMPLING'

method
id_method = 'SAMPLING'
sampling
sample_type lhs
samples 10
model_pointer = "TRUE_MODEL"
seed 1234

model
id_model = 'TRUE_MODEL'
single
interface_pointer = 'INTERFACE'
variables_pointer = 'VARIABLES'
responses_pointer = 'RESPONSES'

variables
id_variables = 'VARIABLES'
continuous_design = 2
descriptors 'PARAM1' 'PARAM2'
initial_point 0.5 1.0
lower_bounds 0.0 0.0
upper_bounds 1.0 2.0

interface,
id_interface = 'INTERFACE'
batch
python
analysis_drivers
'evaluator'

responses
id_responses = 'RESPONSES'
descriptors 'OBJ1' 'OBJ2'
objective_functions = 2
no_gradients
no_hessians
Loading

0 comments on commit 4e149b0

Please sign in to comment.