diff --git a/Dockerfile.report b/Dockerfile.report
new file mode 100644
index 0000000..71037b1
--- /dev/null
+++ b/Dockerfile.report
@@ -0,0 +1,22 @@
+FROM python:3.10
+
+RUN mkdir knowledge-middleware
+WORKDIR knowledge-middleware
+
+COPY pyproject.toml /knowledge-middleware/pyproject.toml
+COPY poetry.lock /knowledge-middleware/poetry.lock
+COPY README.md /knowledge-middleware/README.md
+
+RUN pip install --no-cache-dir poetry==1.5.1
+RUN poetry config virtualenvs.create true && \
+ poetry config virtualenvs.in-project true && \
+ poetry install --no-root --no-cache
+
+COPY api /knowledge-middleware/api
+COPY lib /knowledge-middleware/lib
+COPY worker /knowledge-middleware/worker
+COPY tests /knowledge-middleware/tests
+RUN poetry install --only-root --no-cache
+
+EXPOSE 8000
+CMD [ "poetry", "run", "poe", "report" ]
diff --git a/docker-bake.hcl b/docker-bake.hcl
index 7e7af33..d0da4f0 100644
--- a/docker-bake.hcl
+++ b/docker-bake.hcl
@@ -56,7 +56,7 @@ target "knowledge-middleware-worker-base" {
target "knowledge-middleware-report-base" {
context = "."
tags = tag("knowledge-middleware-report", "", "")
- dockerfile = "report/Dockerfile"
+ dockerfile = "Dockerfile.report"
}
target "knowledge-middleware-api" {
diff --git a/pyproject.toml b/pyproject.toml
index b191811..7e24e46 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -29,13 +29,9 @@ pyyaml = "^6.0.1"
pre-commit = "^3.3.3"
-[tool.poetry.scripts]
-status-test = "tests.report:test"
-gen-report = "tests.report:report"
-
[tool.poe.tasks]
-_test = "status-test"
-_report = "gen-report"
+_test.script = "tests.report:test"
+_report.script = "tests.report:gen_report"
report.sequence = ["_test", "_report"]
report.ignore_fail = true
diff --git a/report/Dockerfile b/report/Dockerfile
deleted file mode 100644
index c10798e..0000000
--- a/report/Dockerfile
+++ /dev/null
@@ -1,27 +0,0 @@
-FROM scratch as tmp
-
-WORKDIR /
-
-COPY pyproject.toml pyproject.toml
-COPY poetry.lock poetry.lock
-COPY README.md README.md
-
-FROM python:3.10
-
-COPY --from=tmp / /
-
-WORKDIR /
-RUN pip install --no-cache-dir poetry==1.5.1
-RUN poetry config virtualenvs.create true && \
- poetry config virtualenvs.in-project true && \
- poetry install --no-root --no-cache
-
-COPY api api
-COPY lib lib
-COPY worker worker
-COPY tests tests
-RUN poetry install
-
-EXPOSE 8000
-ENV PYTHONPATH=/
-CMD [ "poetry", "run", "poe", "report" ]
diff --git a/tests/Home.py b/tests/Home.py
deleted file mode 100644
index a0e3993..0000000
--- a/tests/Home.py
+++ /dev/null
@@ -1,23 +0,0 @@
-import json
-from functools import reduce
-from collections import defaultdict
-
-import streamlit as st
-import pandas as pd
-
-"""
-# Integration Healthchecks
-
-## TODOs
-- [x] TA1
-- [ ] TA3
-
-"""
-
-st.sidebar.markdown("""
-# Status of Integrations
-
-This app contains information about integration
-with various TAs.
-""")
-
diff --git a/tests/create_table.py b/tests/create_table.py
deleted file mode 100644
index c220684..0000000
--- a/tests/create_table.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import json
-
-with open("tests/output/report.json", "r") as f:
- data_dict = json.load(f)
-
-tests = set()
-operations = set()
-
-for scenario, values in data_dict.items():
- for operation, raw_tests in values['operations'].items():
- operations.add(operation)
- for test in raw_tests:
- tests.add(test)
-
-html = ""
-
-for test in sorted(tests):
- table = f'
{test}
\n'
- table += '\n'
-
- table += '\nScenarios | \n'
- for operation in sorted(operations):
- table += f'{operation} | \n'
- table += '
\n'
-
- for scenario in sorted(data_dict.keys()):
- table += f'\n{scenario} | \n'
- for operation in sorted(operations):
- operation_data = data_dict[scenario]['operations'].get(operation, {}).get(test, None)
- if operation_data is not None:
- if isinstance(operation_data, bool):
- table += f"{'✅' if operation_data else '❌'} | \n"
- else:
- table += f"{operation_data} | \n"
- else:
- table += "⚠️ | \n" # Indicating not applicable/missing
- table += '
\n'
-
- table += '
\n'
-
- html += table
-
-with open("tests/output/report.html", "w") as file:
- file.write(html)
diff --git a/tests/pages/1_TA1.py b/tests/pages/1_TA1.py
deleted file mode 100644
index d6c8c3e..0000000
--- a/tests/pages/1_TA1.py
+++ /dev/null
@@ -1,91 +0,0 @@
-import json
-import datetime
-import os
-import re
-from functools import reduce
-from collections import defaultdict
-
-import streamlit as st
-import pandas as pd
-
-
-def custom_title(s):
- # List of words you want to fully capitalize
- FULL_CAPS = ['pdf', 'amr']
-
- words = s.replace('_', ' ').split()
- capitalized_words = [word.upper() if word in FULL_CAPS else word.title() for word in words]
- return ' '.join(capitalized_words)
-
-# Get a list of all report files with timestamp
-report_dir = "tests/output/"
-report_files = [f for f in os.listdir(report_dir) if re.match(r'report_\d{8}_\d{6}\.json', f)]
-report_files.sort(reverse=True) # Sort the files so the most recent is on top
-
-def format_timestamp_from_filename(filename):
- # Extract timestamp from filename
- match = re.search(r'report_(\d{8})_(\d{6})\.json', filename)
- if match:
- date_part, time_part = match.groups()
- # Convert to datetime object
- dt = datetime.datetime.strptime(f"{date_part}{time_part}", '%Y%m%d%H%M%S')
- # Return formatted string
- return dt.strftime('%Y-%m-%d %H:%M:%S')
- return None
-
-# Create a mapping of formatted timestamp to filename
-timestamp_to_filename = {format_timestamp_from_filename(f): f for f in report_files}
-
-# Let the user select a report based on formatted timestamps
-st.title("TA1 Integration Dashboard")
-selected_timestamp = st.selectbox("Select a report", list(timestamp_to_filename.keys()))
-
-# Map back to the original file name
-selected_report = timestamp_to_filename[selected_timestamp]
-
-# Open the selected report
-with open(os.path.join(report_dir, selected_report)) as file:
- report = json.load(file)
-
-test_results = defaultdict(lambda: defaultdict())
-
-for scenario, content in report.items():
- for operation, tests in content["operations"].items():
- for name, result in tests.items():
- test_results[name][(content["name"], operation)] = result
-
-scenarios = [report[scenario]["name"] for scenario in report.keys()]
-operations = list(reduce(lambda left, right: left.union(right), [set(content["operations"].keys()) for content in report.values()], set()))
-tests = sorted([i for i in test_results.keys() if i != "Logs"], reverse=True)
-tests.append("Logs")
-
-
-dataframes = {name: pd.DataFrame(index=scenarios, columns=operations) for name in tests}
-
-st.sidebar.markdown("""
-# TA1
-
-TA1 integration status and quality metrics.
-
-The current metrics are:
-- Status of `knowledge-middleware` integration
-- F-score for conversion of code/equations to AMR
-- Execution time
-- Application logs
-""")
-st.write("### Scenario Overview")
-scenarios_overview = ""
-for kk, vv in sorted(report.items(), key=lambda item: item[1]['name']):
- scenarios_overview += f"- **{vv['name']}**: {vv['description']}\n"
-st.write(scenarios_overview)
-
-for test in tests:
- df = dataframes[test]
- results = test_results[test]
- for (scenario_name, operation), result in results.items():
- df.at[scenario_name, operation] = result
- st.write(f"### {test}")
- df.replace({False: "❌", True: "✅", None: ""}, inplace=True)
- df.columns = [custom_title(col) for col in df.columns]
- df = df.sort_index()
- df
\ No newline at end of file
diff --git a/tests/report.py b/tests/report.py
index 187e2e4..0937c29 100644
--- a/tests/report.py
+++ b/tests/report.py
@@ -15,14 +15,14 @@ def test(output_file="tests/output/tests.json"):
pytest.main(["--json-report", f"--json-report-file={output_file}"])
-def report():
+def gen_report():
# TODO: Make this into a predefined struct
- report = defaultdict(lambda: {"operations": defaultdict(dict)})
+ scenarios = defaultdict(lambda: {"operations": defaultdict(dict)})
if os.path.exists("tests/output/qual.csv"):
with open("tests/output/qual.csv", "r", newline="") as file:
qual = csv.reader(file)
for scenario, operation, test, result in qual:
- report[scenario]["operations"][operation][test] = result
+ scenarios[scenario]["operations"][operation][test] = result
with open("tests/output/tests.json", "r") as file:
raw_tests = json.load(file)["tests"]
@@ -34,24 +34,48 @@ def add_case(testobj):
operation, scenario = match_result[1], match_result[2]
passed = testobj["outcome"] == "passed"
duration = round(testobj["call"]["duration"],2)
- report[scenario]["operations"][operation]["Integration Status"] = passed
- report[scenario]["operations"][operation]["Execution Time"] = duration
+ scenarios[scenario]["operations"][operation]["Integration Status"] = passed
+ scenarios[scenario]["operations"][operation]["Execution Time"] = duration
try:
logs = testobj["call"]["stderr"]
- report[scenario]["operations"][operation]["Logs"] = logs
+ scenarios[scenario]["operations"][operation]["Logs"] = logs
except Exception as e:
print(f"Unable to obtain logs for {full_name}: {e}")
for testobj in raw_tests: add_case(testobj)
- for scenario in report:
+ for scenario in scenarios:
with open(f"tests/scenarios/{scenario}/config.yaml") as file:
spec = yaml.load(file, yaml.CLoader)
- report[scenario]["name"] = spec["name"]
- report[scenario]["description"] = spec["description"]
+ scenarios[scenario]["name"] = spec["name"]
+ scenarios[scenario]["description"] = spec["description"]
+
+ report = {
+ "scenarios": scenarios,
+ # TODO: Grab version
+ # NOTE: This is broken up currently because we expect different version calls
+ "services": {
+ "TA1_UNIFIED_URL":{
+ "source": settings.TA1_UNIFIED_URL,
+ "version": "UNAVAILABLE"
+ },
+ "SKEMA_RS_URL":{
+ "source": settings.SKEMA_RS_URL,
+ "version": "UNAVAILABLE"
+ },
+ "MIT_TR_URL":{
+ "source": settings.MIT_TR_URL,
+ "version": "UNAVAILABLE"
+ },
+ "COSMOS_URL":{
+ "source": settings.COSMOS_URL,
+ "version": "UNAVAILABLE"
+ },
+ }
+ }
timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
filename = f"report_{timestamp}.json"
- fullpath = os.path.join("tests/output", "filename")
+ fullpath = os.path.join("tests/output", filename)
with open(fullpath, "w") as file:
json.dump(report, file, indent=2)
@@ -60,4 +84,4 @@ def add_case(testobj):
s3.upload_file(fullpath, settings.BUCKET, full_handle)
if __name__ == "__main__":
- report()
\ No newline at end of file
+ gen_report()
\ No newline at end of file