diff --git a/tested/internationalization/en.yaml b/tested/internationalization/en.yaml index 04c37b3b..90cd3d8b 100644 --- a/tested/internationalization/en.yaml +++ b/tested/internationalization/en.yaml @@ -52,9 +52,7 @@ en: exitcode: "Exitcode %{exitcode}." core: unsupported: - language: >- - This exercise can't be solved in this programming language: - %{language} + language: "Unsupported programming language" compilation: "Compilation" invalid: source-code: "Invalid source code" diff --git a/tested/internationalization/nl.yaml b/tested/internationalization/nl.yaml index 29994397..f245a2dc 100644 --- a/tested/internationalization/nl.yaml +++ b/tested/internationalization/nl.yaml @@ -52,9 +52,7 @@ nl: exitcode: "Exitcode %{exitcode}." core: unsupported: - language: >- - Deze oefening kan niet opgelost worden in deze programmeertaal: - %{language} + language: "Niet-ondersteunde programmeertaal" compilation: "Compilatie" invalid: source-code: "Ongeldige broncode" diff --git a/tested/judge/core.py b/tested/judge/core.py index 10b642d7..35337088 100644 --- a/tested/judge/core.py +++ b/tested/judge/core.py @@ -59,27 +59,23 @@ def judge(bundle: Bundle): report_update( bundle.out, CloseJudgement( - accepted=False, status=StatusMessage( enum=Status.INTERNAL_ERROR, - human=get_i18n_string( - "judge.core.unsupported.language", - language=bundle.config.programming_language, - ), + human=get_i18n_string("judge.core.unsupported.language"), ), ), ) _logger.info("Required features not supported.") return # Not all required features are supported. + # Do the set-up for the judgement. collector = OutputManager(bundle.out) collector.add(StartJudgement()) - max_time = float(bundle.config.time_limit) * 0.9 start = time.perf_counter() # Run the linter. - # TODO: move to the back? Or at least limit the time. + # TODO: do this in parallel run_linter(bundle, collector, max_time) if time.perf_counter() - start > max_time: terminate(bundle, collector, Status.TIME_LIMIT_EXCEEDED) @@ -87,7 +83,16 @@ def judge(bundle: Bundle): _logger.debug("Planning execution") planned_units = plan_test_suite(bundle) + _judge_planned_units(bundle, collector, planned_units, start, max_time) + +def _judge_planned_units( + bundle: Bundle, + collector: OutputManager, + planned_units: list[PlannedExecutionUnit], + start: float, + max_time: float, +): _logger.debug("Generating files") common_dir, dependencies, selector = _generate_files(bundle, planned_units) @@ -202,6 +207,8 @@ def judge(bundle: Bundle): terminate(bundle, collector, result_status) return + # Depending on the result, we might want to do the next execution anyway. + collector.add(CloseJudgement()) diff --git a/tests/test_functionality.py b/tests/test_functionality.py index 0eb57b0d..b99c9d02 100644 --- a/tests/test_functionality.py +++ b/tests/test_functionality.py @@ -545,22 +545,6 @@ def test_programmed_evaluator_wrong(lang: str, tmp_path: Path, pytestconfig): assert len(updates.find_all("append-message")) == 1 -@pytest.mark.parametrize("language", ALL_LANGUAGES) -def test_context_compilation(language: str, tmp_path: Path, pytestconfig, mocker): - config_ = {"options": {"mode": "context"}} - # Mock the compilation callback to ensure we call it for every context. - lang_class = LANGUAGES[language] - spy = mocker.spy(lang_class, "compilation") - conf = configuration( - pytestconfig, "echo", language, tmp_path, "two.tson", "correct", config_ - ) - result = execute_config(conf) - updates = assert_valid_output(result, pytestconfig) - assert len(updates.find_all("start-testcase")) == 2 - assert updates.find_status_enum() == ["correct"] * 2 - assert spy.call_count == 2 - - @pytest.mark.parametrize("language", ALL_LANGUAGES) def test_batch_compilation(language: str, tmp_path: Path, pytestconfig, mocker): config_ = {"options": {"mode": "batch"}}