diff --git a/tests/exercises/division/evaluation/Evaluator.cs b/tests/exercises/division/evaluation/Evaluator.cs index 1e6508b1..932ad9d3 100644 --- a/tests/exercises/division/evaluation/Evaluator.cs +++ b/tests/exercises/division/evaluation/Evaluator.cs @@ -10,4 +10,8 @@ public static EvaluationResult Evaluate(object? actual) { return new EvaluationResult(false, "System.DivideByZeroException", actual == null ? "" : actual.ToString(), messages); } } + + public static EvaluationResult Runtime(object? actual) { + throw new ArgumentOutOfRangeException("hello"); + } } diff --git a/tests/exercises/division/evaluation/Evaluator.hs b/tests/exercises/division/evaluation/Evaluator.hs index 613eeb0f..95340379 100644 --- a/tests/exercises/division/evaluation/Evaluator.hs +++ b/tests/exercises/division/evaluation/Evaluator.hs @@ -20,6 +20,14 @@ evaluate (Just x) = } +runtime :: Maybe (SomeException) -> EvaluationResult +runtime _ = evaluationResult { + readableExpected = Just $ show DivideByZero, + readableActual = Just $ show (100 `div` 0), + messages = [message "Expected DivideByZero, got nothing."] + } + + handleA :: ArithException -> EvaluationResult handleA DivideByZero = evaluationResult { result = True, @@ -30,4 +38,4 @@ handleA other = evaluationResult { readableExpected = Just $ show DivideByZero, readableActual = Just $ show other, messages = [message "Expected DivideByZero, got something else."] - } \ No newline at end of file + } diff --git a/tests/exercises/division/evaluation/Evaluator.java b/tests/exercises/division/evaluation/Evaluator.java index d0c87408..8a12ad75 100644 --- a/tests/exercises/division/evaluation/Evaluator.java +++ b/tests/exercises/division/evaluation/Evaluator.java @@ -14,4 +14,8 @@ public static EvaluationResult evaluate(Object actual) { } } -} \ No newline at end of file + public static EvaluationResult runtime(Object actual) { + throw new RuntimeException("Something went wrong!"); + } + +} diff --git a/tests/exercises/division/evaluation/Evaluator.kt b/tests/exercises/division/evaluation/Evaluator.kt index 62ab6c7c..10a2c25b 100644 --- a/tests/exercises/division/evaluation/Evaluator.kt +++ b/tests/exercises/division/evaluation/Evaluator.kt @@ -14,5 +14,10 @@ class Evaluator { .build() } } + + @JvmStatic + fun runtime(actual: Any?): EvaluationResult { + throw Exception("Hi There!") + } } -} \ No newline at end of file +} diff --git a/tests/exercises/division/evaluation/EvaluatorSyntaxError.cs b/tests/exercises/division/evaluation/EvaluatorSyntaxError.cs new file mode 100644 index 00000000..3ecbd3a8 --- /dev/null +++ b/tests/exercises/division/evaluation/EvaluatorSyntaxError.cs @@ -0,0 +1,9 @@ +using System; +using Tested; +a +public class Evaluator { zeffa ff v + public static E vxwvsages = new List() { new Message("Expected DivideByZeroException, got something else.") }; + return new EvaluationResult(false, "System.DivideByZeroException", actual == null ? "" : actual.ToString(), messages); + } + } +}vv qega diff --git a/tests/exercises/division/evaluation/EvaluatorSyntaxError.hs b/tests/exercises/division/evaluation/EvaluatorSyntaxError.hs new file mode 100644 index 00000000..c6fa7935 --- /dev/null +++ b/tests/exercises/division/evaluation/EvaluatorSyntaxError.hs @@ -0,0 +1,37 @@ +{-# LANGUAGE ScopedTypeVariables #-} +module Evaluator where + +import EvaluationUtils +import Control.Exception + +evaluate :: Maybe (SomeException) -> EvaluationResult +evaluate Nothing = evaluationResult { + readableExpected = Just $ show DivideByZero, + readableActual = Just "", + messages = [message "Expected DivideByZero, got nothing."] + } +evaluate (Just x) = + case fromException x off aegaeglho hapyat²uùµajµjoµjµ µg jùµj ùtjùpµtjùpjµj(&µj µjµajtpµj + + egkzmknzk oih + gcd + + + + + + zgg[message "Expected DivideByZero, got nothing."] + } + + +handleA :: ArithException -> EvaluationResult +handleA DivideByZero = evaluationResult { + result = True, + readableExpected = Just $ show DivideByZero, + readableActual = Just $ show DivideByZero + } +handleA other = evaluationResult { + readableExpected = Just $ show DivideByZero, + readableActual = Just $ show other, + messages = [message "Expected DivideByZero, got something else."] + } diff --git a/tests/exercises/division/evaluation/EvaluatorSyntaxError.java b/tests/exercises/division/evaluation/EvaluatorSyntaxError.java new file mode 100644 index 00000000..f41a093b --- /dev/null +++ b/tests/exercises/division/evaluation/EvaluatorSyntaxError.java @@ -0,0 +1,15 @@ +public class Evaluator { egqdg sd + public static EvaluationResult evaluate(Object actual) { + if (actual instanceof ArithmeticException) { + return EvaluationResuvd lt.builder(true) + .withReadableExpected(actual.toString()) + .withReadableActual(actual.toString()) + .build(); + } else { + return EvaluationResusdlt.builder(false) + .withReadableExpected("ArithmeticException") + .withReadableActual(actual == null ? "" : actual.toString()) + .withMessage(nbsd + } + +} sbsdgssdé§u u diff --git a/tests/exercises/division/evaluation/EvaluatorSyntaxError.kt b/tests/exercises/division/evaluation/EvaluatorSyntaxError.kt new file mode 100644 index 00000000..b065b225 --- /dev/null +++ b/tests/exercises/division/evaluation/EvaluatorSyntaxError.kt @@ -0,0 +1,16 @@ +class Evaluator { ae + companion object {t "t"&t + @JvmStatic zfz"r'" '" ' + fun evaluate(actual: Any?): EvaluationResult { + return if (actual is ArithmeticException) { + EvaluationResult.Builder(result = true, + readableExpected = actual.toString(), + aeg readableActual = actual.toString()).build() + } else { + EvalugtionResult.Builder(result = false, + readableExpected = "ArithmeticException", + readableActual = actual?.toString() ?: "") + .withMessage(EvaluationResult.Message("Expected ArithmeticException, got something else.")) + .build() + qg +}qd qsdvdvqd diff --git a/tests/exercises/division/evaluation/evaluator-syntax-error.py b/tests/exercises/division/evaluation/evaluator-syntax-error.py new file mode 100644 index 00000000..4accc118 --- /dev/null +++ b/tests/exercises/division/evaluation/evaluator-syntax-error.py @@ -0,0 +1,18 @@ + jdhbkd mbzough import traceback + +from evaluation_utils import EvaluationResult, Message + + +def evaluate(value): + if isinstance(value, ZeroDivisionError): + # If a zero division error, show the stacktrace. + formatted = "".join(traceback.format_exception(type(value), value, value.__traceback__)) + return EvaluationResult(True, formatted, formatted) + elif isinstance(value, Exception): + # If another error, show the stacktrace as well. + formatted = "".join(traceback.format_exception(type(value), value, value.__traceback__)) + return EvaluationResult(False, "ZeroDivisionError", formatted, [Message(f"Verwachtte een ZeroDivisionError, maar kreeg een {type(value).__name__}.")]) + else: + # Else show the str of the value. + actual = str(value) if value else "" + return EvaluationResult(False, "ZeroDivisionError", actual, [Message("Verwachtte een ZeroDivisionError.")]) diff --git a/tests/exercises/division/evaluation/evaluator.py b/tests/exercises/division/evaluation/evaluator.py index 51bfddc7..58832e1e 100644 --- a/tests/exercises/division/evaluation/evaluator.py +++ b/tests/exercises/division/evaluation/evaluator.py @@ -16,3 +16,7 @@ def evaluate(value): # Else show the str of the value. actual = str(value) if value else "" return EvaluationResult(False, "ZeroDivisionError", actual, [Message("Verwachtte een ZeroDivisionError.")]) + + +def runtime(_value): + raise ValueError("Hallo") diff --git a/tests/exercises/division/evaluation/plan-runtime-exception.json b/tests/exercises/division/evaluation/plan-runtime-exception.json new file mode 100644 index 00000000..506b5dee --- /dev/null +++ b/tests/exercises/division/evaluation/plan-runtime-exception.json @@ -0,0 +1,50 @@ +{ + "tabs": [ + { + "name": "Feedback", + "runs": [ + { + "run": { + "description": "Uitvoeren code", + "input": { + "main_call": true + }, + "output": { + "exception": { + "evaluator": { + "type": "specific", + "evaluators": { + "python": { + "file": "evaluator.py", + "name": "runtime" + }, + "java": { + "file": "Evaluator.java", + "name": "runtime" + }, + "kotlin": { + "file": "Evaluator.kt", + "name": "runtime" + }, + "haskell": { + "file": "Evaluator.hs", + "name": "runtime" + }, + "runhaskell": { + "file": "Evaluator.hs", + "name": "runtime" + }, + "csharp": { + "file": "Evaluator.cs", + "name": "runtime" + } + } + } + } + } + } + } + ] + } + ] +} diff --git a/tests/exercises/division/evaluation/plan-syntax-error.json b/tests/exercises/division/evaluation/plan-syntax-error.json new file mode 100644 index 00000000..d62aa49e --- /dev/null +++ b/tests/exercises/division/evaluation/plan-syntax-error.json @@ -0,0 +1,44 @@ +{ + "tabs": [ + { + "name": "Feedback", + "runs": [ + { + "run": { + "description": "Uitvoeren code", + "input": { + "main_call": true + }, + "output": { + "exception": { + "evaluator": { + "type": "specific", + "evaluators": { + "python": { + "file": "evaluator.py" + }, + "java": { + "file": "EvaluatorSyntaxError.java" + }, + "kotlin": { + "file": "EvaluatorSyntaxError.kt" + }, + "haskell": { + "file": "EvaluatorSyntaxError.hs" + }, + "runhaskell": { + "file": "EvaluatorSyntaxError.hs" + }, + "csharp": { + "file": "EvaluatorSyntaxError.cs" + } + } + } + } + } + } + } + ] + } + ] +} diff --git a/tests/language_markers.py b/tests/language_markers.py index 747b6584..87d10602 100644 --- a/tests/language_markers.py +++ b/tests/language_markers.py @@ -13,3 +13,5 @@ pytest.param("runhaskell", marks=pytest.mark.haskell), ] ALL_LANGUAGES = ALL_SPECIFIC_LANGUAGES + ["bash"] + +EXCEPTION_LANGUAGES = ["python", "java", "kotlin", "csharp", "haskell"] diff --git a/tests/manual_utils.py b/tests/manual_utils.py index afce524d..536fb1b0 100644 --- a/tests/manual_utils.py +++ b/tests/manual_utils.py @@ -3,6 +3,7 @@ from io import StringIO from pathlib import Path +import pytest from jsonschema import validate from tested.cli import CommandDict, split_output @@ -28,7 +29,7 @@ def assert_valid_output(output: str, config) -> CommandDict: def configuration( - config, + config: pytest.Config, exercise: str, language: str, work_dir: Path, @@ -36,7 +37,7 @@ def configuration( solution: str = "solution", options=None, ) -> DodonaConfig: - exercise_dir = Path(config.rootdir) / "tests" / "exercises" + exercise_dir = config.rootpath / "tests" / "exercises" ep = exercise_dir / exercise return exercise_configuration( config, ep, language, work_dir, suite, solution, options @@ -44,7 +45,7 @@ def configuration( def exercise_configuration( - config, + config: pytest.Config, exercise_directory: Path, language: str, work_dir: Path, @@ -64,7 +65,7 @@ def exercise_configuration( "natural_language": "nl", "resources": exercise_directory / "evaluation", "source": exercise_directory / "solution" / f"{solution}.{ext}", - "judge": Path(f"{config.rootdir}"), + "judge": config.rootpath, "workdir": work_dir, "test_suite": suite, "options": {"linter": False}, diff --git a/tests/test_functionality.py b/tests/test_functionality.py index 9434ad98..35277666 100644 --- a/tests/test_functionality.py +++ b/tests/test_functionality.py @@ -12,7 +12,6 @@ from pathlib import Path import pytest -from language_markers import ALL_LANGUAGES, ALL_SPECIFIC_LANGUAGES from tested.configs import create_bundle from tested.datatypes import BasicBooleanTypes, BasicNumericTypes, BasicStringTypes @@ -28,6 +27,11 @@ StringType, ) from tested.testsuite import Context, MainInput, Suite, Tab, Testcase, TextData +from tests.language_markers import ( + ALL_LANGUAGES, + ALL_SPECIFIC_LANGUAGES, + EXCEPTION_LANGUAGES, +) from tests.manual_utils import assert_valid_output, configuration, execute_config quotes = { @@ -224,55 +228,21 @@ def test_io_function_exercise_haskell_io(language: str, tmp_path: Path, pytestco assert updates.find_status_enum() == ["correct"] -@pytest.mark.parametrize("language", ALL_SPECIFIC_LANGUAGES) -def test_specific_evaluation(language: str, tmp_path: Path, pytestconfig): - conf = configuration( - pytestconfig, - "echo-function", - language, - tmp_path, - "two-specific.tson", - "correct", - ) - result = execute_config(conf) - updates = assert_valid_output(result, pytestconfig) - assert updates.find_status_enum() == ["wrong", "correct"] - assert len(updates.find_all("append-message")) == 2 - - -@pytest.mark.parametrize( - "lang", - [ - "python", - "java", - "kotlin", - "csharp", - pytest.param("haskell", marks=pytest.mark.haskell), - pytest.param("runhaskell", marks=pytest.mark.haskell), - ], -) -def test_language_evaluator_exception_correct(lang: str, tmp_path: Path, pytestconfig): +@pytest.mark.parametrize("lang", EXCEPTION_LANGUAGES) +def test_generic_exception_wrong( + lang: str, tmp_path: Path, pytestconfig: pytest.Config +): conf = configuration( - pytestconfig, "division", lang, tmp_path, "plan.json", "correct" + pytestconfig, "division", lang, tmp_path, "plan-generic-exception.json", "wrong" ) result = execute_config(conf) updates = assert_valid_output(result, pytestconfig) - assert updates.find_status_enum() == ["correct"] + assert updates.find_status_enum() == ["wrong"] -@pytest.mark.parametrize( - "lang", - [ - "python", - "java", - "kotlin", - "csharp", - pytest.param("haskell", marks=pytest.mark.haskell), - pytest.param("runhaskell", marks=pytest.mark.haskell), - ], -) -def test_language_evaluator_generic_exception_correct( - lang: str, tmp_path: Path, pytestconfig +@pytest.mark.parametrize("lang", EXCEPTION_LANGUAGES) +def test_generic_exception_correct( + lang: str, tmp_path: Path, pytestconfig: pytest.Config ): conf = configuration( pytestconfig, @@ -287,38 +257,9 @@ def test_language_evaluator_generic_exception_correct( assert updates.find_status_enum() == ["correct"] -@pytest.mark.parametrize( - "lang", - [ - "python", - "java", - "kotlin", - "csharp", - pytest.param("haskell", marks=pytest.mark.haskell), - pytest.param("runhaskell", marks=pytest.mark.haskell), - ], -) -def test_language_evaluator_exception_wrong(lang: str, tmp_path: Path, pytestconfig): - conf = configuration(pytestconfig, "division", lang, tmp_path, "plan.json", "wrong") - result = execute_config(conf) - updates = assert_valid_output(result, pytestconfig) - assert updates.find_status_enum() == ["wrong"] - assert len(updates.find_all("append-message")) == 1 - - -@pytest.mark.parametrize( - "lang", - [ - "python", - "java", - "kotlin", - "csharp", - pytest.param("haskell", marks=pytest.mark.haskell), - pytest.param("runhaskell", marks=pytest.mark.haskell), - ], -) -def test_language_evaluator_generic_exception_wrong_error( - lang: str, tmp_path: Path, pytestconfig +@pytest.mark.parametrize("lang", EXCEPTION_LANGUAGES) +def test_generic_exception_wrong_error( + lang: str, tmp_path: Path, pytestconfig: pytest.Config ): conf = configuration( pytestconfig, @@ -333,50 +274,6 @@ def test_language_evaluator_generic_exception_wrong_error( assert updates.find_status_enum() == ["wrong"] -@pytest.mark.parametrize( - "lang", - [ - "python", - "java", - "kotlin", - "csharp", - pytest.param("haskell", marks=pytest.mark.haskell), - pytest.param("runhaskell", marks=pytest.mark.haskell), - ], -) -def test_language_evaluator_exception_wrong_error( - lang: str, tmp_path: Path, pytestconfig -): - conf = configuration( - pytestconfig, "division", lang, tmp_path, "plan.json", "wrong-error" - ) - result = execute_config(conf) - updates = assert_valid_output(result, pytestconfig) - assert updates.find_status_enum() == ["wrong"] - - -@pytest.mark.parametrize( - "lang", - [ - "python", - "java", - "kotlin", - "csharp", - pytest.param("haskell", marks=pytest.mark.haskell), - pytest.param("runhaskell", marks=pytest.mark.haskell), - ], -) -def test_language_evaluator_generic_exception_wrong( - lang: str, tmp_path: Path, pytestconfig -): - conf = configuration( - pytestconfig, "division", lang, tmp_path, "plan-generic-exception.json", "wrong" - ) - result = execute_config(conf) - updates = assert_valid_output(result, pytestconfig) - assert updates.find_status_enum() == ["wrong"] - - @pytest.mark.parametrize("lang", ["python", "java", "kotlin", "csharp"]) def test_assignment_and_use_in_expression(lang: str, tmp_path: Path, pytestconfig): conf = configuration( diff --git a/tests/test_programmed_oracle.py b/tests/test_programmed_oracle.py index ee7161e7..fc25ccb3 100644 --- a/tests/test_programmed_oracle.py +++ b/tests/test_programmed_oracle.py @@ -5,8 +5,8 @@ from pathlib import Path import pytest -from language_markers import ALL_LANGUAGES +from tests.language_markers import ALL_LANGUAGES from tests.manual_utils import assert_valid_output, configuration, execute_config diff --git a/tests/test_specific_oracle.py b/tests/test_specific_oracle.py new file mode 100644 index 00000000..43c3df75 --- /dev/null +++ b/tests/test_specific_oracle.py @@ -0,0 +1,92 @@ +""" +Testcases for language-specific oracles. +""" + +from pathlib import Path + +import pytest + +from tests.language_markers import ALL_SPECIFIC_LANGUAGES, EXCEPTION_LANGUAGES +from tests.manual_utils import assert_valid_output, configuration, execute_config + + +@pytest.mark.parametrize("language", ALL_SPECIFIC_LANGUAGES) +def test_specific_oracle_return(language: str, tmp_path: Path, pytestconfig): + conf = configuration( + pytestconfig, + "echo-function", + language, + tmp_path, + "two-specific.tson", + "correct", + ) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["wrong", "correct"] + assert len(updates.find_all("append-message")) == 2 + + +@pytest.mark.parametrize("lang", EXCEPTION_LANGUAGES) +def test_specific_oracle_exception_correct( + lang: str, tmp_path: Path, pytestconfig: pytest.Config +): + conf = configuration( + pytestconfig, "division", lang, tmp_path, "plan.json", "correct" + ) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["correct"] + + +@pytest.mark.parametrize("lang", EXCEPTION_LANGUAGES) +def test_specific_oracle_exception_wrong( + lang: str, tmp_path: Path, pytestconfig: pytest.Config +): + conf = configuration(pytestconfig, "division", lang, tmp_path, "plan.json", "wrong") + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["wrong"] + assert len(updates.find_all("append-message")) == 1 + + +@pytest.mark.parametrize("lang", EXCEPTION_LANGUAGES) +def test_specific_oracle_exception_wrong_exception( + lang: str, tmp_path: Path, pytestconfig +): + conf = configuration( + pytestconfig, "division", lang, tmp_path, "plan.json", "wrong-error" + ) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["wrong"] + + +@pytest.mark.parametrize("lang", EXCEPTION_LANGUAGES) +def test_specific_oracle_exception_syntax_error( + lang: str, tmp_path: Path, pytestconfig: pytest.Config +): + conf = configuration( + pytestconfig, "division", lang, tmp_path, "plan-syntax-error.json", "correct" + ) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["compilation error"] + assert len(updates.find_all("append-message")) == 1 + + +@pytest.mark.parametrize("lang", EXCEPTION_LANGUAGES) +def test_specific_oracle_exception_runtime_exception( + lang: str, tmp_path: Path, pytestconfig: pytest.Config +): + conf = configuration( + pytestconfig, + "division", + lang, + tmp_path, + "plan-runtime-exception.json", + "correct", + ) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["wrong", "wrong"] + assert len(updates.find_all("append-message")) >= 1