Skip to content

Commit

Permalink
Add tests after building the wheel
Browse files Browse the repository at this point in the history
  • Loading branch information
wvangeit committed Sep 4, 2024
1 parent 8374725 commit eb2b15b
Show file tree
Hide file tree
Showing 15 changed files with 421 additions and 1 deletion.
6 changes: 5 additions & 1 deletion .github/workflows/build-wheels.yml
Original file line number Diff line number Diff line change
Expand Up @@ -54,12 +54,16 @@ jobs:
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: 3.11
python-version: ${{ matrix.python }}
- name: Get dakota src
run: make get-dakota-src
- uses: pypa/cibuildwheel@v2.16
env:
CIBW_BUILD: ${{ matrix.python }}*${{ matrix.arch }}
- name: Run tests
run:
pip install ./wheelhouse/*.whl
make test
- name: Upload wheels
uses: actions/upload-artifact@v4
with:
Expand Down
3 changes: 3 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,9 @@ all:
wheel: cache-clean clean
CIBW_BUILD=cp311*x86_64 cibuildwheel --platform linux

test:
pytest

install:
pip install -v .

Expand Down
3 changes: 3 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -77,3 +77,6 @@ DAKOTA_PYTHON_WRAPPER = "ON"
DAKOTA_PYTHON_DIRECT_INTERFACE = "ON"
BUILD_SHARED_LIBS = "OFF"
CMAKE_POSITION_INDEPENDENT_CODE = "ON"

[tool.pytest.ini_options]
addopts = "--ignore=dakota/"
48 changes: 48 additions & 0 deletions tests/adasampling/adasampling.in
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
environment
tabular_data
tabular_data_file
'dakota_tabular.dat'
top_method_pointer = 'ADAPTIVE_SAMPLING'

method
id_method = 'ADAPTIVE_SAMPLING'
adaptive_sampling
max_iterations 5
samples_on_emulator 20
fitness_metric gradient
initial_samples = 1
model_pointer = "TRUE_MODEL"
seed 41
batch_selection
naive
misc_options
'batch_size=10'

model
id_model = 'TRUE_MODEL'
single
interface_pointer = 'INTERFACE'
variables_pointer = 'VARIABLES'
responses_pointer = 'RESPONSES'

variables
id_variables = 'VARIABLES'
continuous_design = 2
descriptors 'PARAM1' 'PARAM2'
initial_point 0.5 1.0
lower_bounds 0.0 0.0
upper_bounds 1.0 2.0

interface,
id_interface = 'INTERFACE'
batch
python
analysis_drivers
'evaluator'

responses
id_responses = 'RESPONSES'
descriptors 'OBJ1' 'OBJ2'
objective_functions = 2
no_gradients
no_hessians
36 changes: 36 additions & 0 deletions tests/adasampling/test_adasampling.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
import os
import pathlib as pl

import dakota.environment as dakenv

script_dir = pl.Path(__file__).parent


def evaluator(inputs):
# Get the continuous variables out of the input provided by dakota
params = inputs["cv"]
print(f"Evaluating {params}")

# Put the objective in the dakota 'fns' field of the output
outputs = {"fns": params}
return outputs


def batch_evaluator(batch_input):
return map(evaluator, batch_input)


def test_adasampling(tmp_path):
print("Starting dakota")
os.chdir(tmp_path)
dakota_conf_path = script_dir / "adasampling.in"
dakota_conf = dakota_conf_path.read_text()
study = dakenv.study(
callbacks={"evaluator": batch_evaluator}, input_string=dakota_conf
)

study.execute()


if __name__ == "__main__":
test_adasampling()
46 changes: 46 additions & 0 deletions tests/moga/moga.in
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
# Specify the output file for tabular data
environment
tabular_data
tabular_data_file
'dakota_tabular.dat'
top_method_pointer = 'MULTIOBJ_OPTIMIZATION'

# Define the optimization method
method
id_method = 'MULTIOBJ_OPTIMIZATION'
moga # Multi-Objective Genetic Algorithm
model_pointer = "TRUE_MODEL"
seed 1234 # Set random seed for reproducibility
max_function_evaluations 100 # Maximum number of function evaluations

# Define the model
model
id_model = 'TRUE_MODEL'
single
interface_pointer = 'INTERFACE'
variables_pointer = 'VARIABLES'
responses_pointer = 'RESPONSES'

# Define the variables
variables
id_variables = 'VARIABLES'
continuous_design = 3 # Number of continuous design variables
descriptors 'PARAM1' 'PARAM2' 'PARAM3'
initial_point 2.0 3.0 4.0
lower_bounds 0.0 0.0 0.0
upper_bounds 10.0 10.0 10.0

# Define the interface
interface
id_interface = 'INTERFACE'
python
analysis_drivers
'evaluator' # Python script to evaluate the objectives

# Define the responses
responses
id_responses = 'RESPONSES'
descriptors 'OBJ1' 'OBJ2'
objective_functions = 2 # Number of objective functions
no_gradients # Gradients will not be provided
no_hessians # Hessians will not be provided
43 changes: 43 additions & 0 deletions tests/moga/test_moga.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
import os
import pathlib as pl

import dakota.environment as dakenv

script_dir = pl.Path(__file__).parent

def evaluate(x, y, z):
# Objective 1: Minimize the sum of squares
obj1 = x**2 + y**2 + z**2

# Objective 2: Maximize the product
obj2 = -(x * y * z) # Negated because we conventionally minimize

return obj1, obj2


def evaluator(inputs):
# Get the continuous variables out of the input provided by dakota
params = inputs["cv"]
#print(f"Evaluating {params}")

# Put the objective in the dakota 'fns' field of the output
outputs = {"fns": evaluate(*params)}
return outputs


def test_moga(tmp_path):
print("Starting dakota")

os.chdir(tmp_path)
dakota_conf_path = script_dir / "moga.in"
dakota_conf = dakota_conf_path.read_text()
study = dakenv.study(
callbacks={"evaluator": evaluator},
input_string=dakota_conf,
)

study.execute()


if __name__ == "__main__":
test_moga()
44 changes: 44 additions & 0 deletions tests/simple/simple.in
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
environment
tabular_data
tabular_data_file
'dakota_tabular.dat'
top_method_pointer = 'SAMPLING'

method
id_method = 'SAMPLING'
sampling
sample_type lhs
samples 10
model_pointer = "TRUE_MODEL"
seed 1234

model
id_model = 'TRUE_MODEL'
single
interface_pointer = 'INTERFACE'
variables_pointer = 'VARIABLES'
responses_pointer = 'RESPONSES'

variables
id_variables = 'VARIABLES'
continuous_design = 2
descriptors 'PARAM1' 'PARAM2'
initial_point 0.5 1.0
lower_bounds 0.0 0.0
upper_bounds 1.0 2.0

interface,
id_interface = 'INTERFACE'
python
analysis_drivers
'evaluator'
failure_capture
recover
NaN NaN

responses
id_responses = 'RESPONSES'
descriptors 'OBJ1' 'OBJ2'
objective_functions = 2
no_gradients
no_hessians
37 changes: 37 additions & 0 deletions tests/simple/test_simple.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
import os
import pathlib as pl

import dakota.environment as dakenv

script_dir = pl.Path(__file__).parent


def evaluator(inputs):
# Get the continuous variables out of the input provided by dakota
params = inputs["cv"]
print(f"Evaluating {params}")

# Put the objective in the dakota 'fns' field of the output
outputs = {"fns": params, "failure": 1}

#return Exception()
return outputs


def test_simple(tmp_path):
os.chdir(tmp_path)

print("Starting dakota")

dakota_conf_path = script_dir / "simple.in"
dakota_conf = dakota_conf_path.read_text()
study = dakenv.study(
callbacks={"evaluator": evaluator},
input_string=dakota_conf,
)

study.execute()


if __name__ == "__main__":
test_simple()
42 changes: 42 additions & 0 deletions tests/simple_batch/simple_batch.in
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
environment
tabular_data
tabular_data_file
'dakota_tabular.dat'
top_method_pointer = 'SAMPLING'

method
id_method = 'SAMPLING'
sampling
sample_type lhs
samples 10
model_pointer = "TRUE_MODEL"
seed 1234

model
id_model = 'TRUE_MODEL'
single
interface_pointer = 'INTERFACE'
variables_pointer = 'VARIABLES'
responses_pointer = 'RESPONSES'

variables
id_variables = 'VARIABLES'
continuous_design = 2
descriptors 'PARAM1' 'PARAM2'
initial_point 0.5 1.0
lower_bounds 0.0 0.0
upper_bounds 1.0 2.0

interface,
id_interface = 'INTERFACE'
batch
python
analysis_drivers
'evaluator'

responses
id_responses = 'RESPONSES'
descriptors 'OBJ1' 'OBJ2'
objective_functions = 2
no_gradients
no_hessians
38 changes: 38 additions & 0 deletions tests/simple_batch/test_simple_batch.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
import os
import pathlib as pl

import dakota.environment as dakenv

script_dir = pl.Path(__file__).parent


def evaluator(inputs):
# Get the continuous variables out of the input provided by dakota
params = inputs["cv"]
print(f"Evaluating {params}")

# Put the objective in the dakota 'fns' field of the output
outputs = {"fns": params}
return outputs


def batch_evaluator(batch_input):
return map(evaluator, batch_input)


def test_simple_batch(tmp_path):
print("Starting dakota")

os.chdir(tmp_path)
dakota_conf_path = script_dir / "simple_batch.in"
dakota_conf = dakota_conf_path.read_text()
study = dakenv.study(
callbacks={"evaluator": batch_evaluator},
input_string=dakota_conf,
)

study.execute()


if __name__ == "__main__":
test_simple_batch()
Binary file added tests/simple_restart/dakota.rst
Binary file not shown.
Loading

0 comments on commit eb2b15b

Please sign in to comment.