diff --git a/tested/dsl/schema.json b/tested/dsl/schema.json index 7c67b536..cbde7921 100644 --- a/tested/dsl/schema.json +++ b/tested/dsl/schema.json @@ -1,152 +1,154 @@ { - "$id": "https://github.com/dodona-edu/universal-judge/blob/master/tested/dsl/schema.yaml", - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "DSL Schema", - "description": "DSL test suite for TESTed", - "oneOf": [ + "$id" : "https://github.com/dodona-edu/universal-judge/blob/master/tested/dsl/schema.yaml", + "$schema" : "http://json-schema.org/draft-07/schema#", + "title" : "DSL Schema", + "description" : "DSL test suite for TESTed", + "oneOf" : [ { - "$ref": "#/definitions/_rootObject" + "$ref" : "#/definitions/_rootObject" }, { - "$ref": "#/definitions/tab" + "$ref" : "#/definitions/tab" }, { - "$ref": "#/definitions/_tabList" + "$ref" : "#/definitions/_tabList" } ], - "definitions": { - "_tabList": { - "type": "array", - "minItems": 1, - "items": { - "$ref": "#/definitions/tab" + "definitions" : { + "_tabList" : { + "type" : "array", + "minItems" : 1, + "items" : { + "$ref" : "#/definitions/tab" } }, - "_contextList": { - "type": "array", - "minItems": 1, - "items": { - "$ref": "#/definitions/context" + "_contextList" : { + "type" : "array", + "minItems" : 1, + "items" : { + "$ref" : "#/definitions/context" } }, - "_testcaseList": { - "type": "array", - "minItems": 1, - "items": { - "$ref": "#/definitions/testcase" + "_testcaseList" : { + "type" : "array", + "minItems" : 1, + "items" : { + "$ref" : "#/definitions/testcase" } }, - "_rootObject": { - "type": "object", - "properties": { - "config": { - "$ref": "#/definitions/globalConfig", - "description": "Configuration applicable to the whole test suite." - }, - "namespace": { - "type": "string", - "description": "Namespace of the submitted solution, in `snake_case`" - }, - "tabs": { - "$ref": "#/definitions/_tabList" + "_rootObject" : { + "type" : "object", + "properties" : { + "config" : { + "$ref" : "#/definitions/globalConfig", + "description" : "Configuration applicable to the whole test suite." + }, + "namespace" : { + "type" : "string", + "description" : "Namespace of the submitted solution, in `snake_case`" + }, + "tabs" : { + "$ref" : "#/definitions/_tabList" } }, - "required": [ + "required" : [ "tabs" ] }, - "tab": { - "type": "object", - "properties": { - "config": { - "$ref": "#/definitions/globalConfig", - "description": "Configuration applicable to this tab" - }, - "hidden": { - "type": "boolean", - "description": "Defines if the tab is hidden for the student or not" - }, - "tab": { - "type": "string", - "description": "The name of this tab." - }, - "contexts": { - "$ref": "#/definitions/_contextList" - }, - "testcases": { - "$ref": "#/definitions/_testcaseList" + "tab" : { + "type" : "object", + "properties" : { + "config" : { + "$ref" : "#/definitions/globalConfig", + "description" : "Configuration applicable to this tab" + }, + "hidden" : { + "type" : "boolean", + "description" : "Defines if the tab is hidden for the student or not" + }, + "tab" : { + "type" : "string", + "description" : "The name of this tab." + }, + "contexts" : { + "$ref" : "#/definitions/_contextList" + }, + "testcases" : { + "$ref" : "#/definitions/_testcaseList" } }, - "oneOf": [ + "oneOf" : [ { - "required": [ + "required" : [ "contexts", "tab" ] }, { - "required": [ + "required" : [ "testcases", "tab" ] } ] }, - "context": { - "type": "object", - "properties": { - "config": { - "$ref": "#/definitions/globalConfig", - "description": "Configuration settings at context level" - }, - "context": { - "type": "string", - "description": "Description of this context." - }, - "testcases": { - "$ref": "#/definitions/_testcaseList" + "context" : { + "type" : "object", + "properties" : { + "config" : { + "$ref" : "#/definitions/globalConfig", + "description" : "Configuration settings at context level" + }, + "context" : { + "type" : "string", + "description" : "Description of this context." + }, + "testcases" : { + "$ref" : "#/definitions/_testcaseList" } }, - "required": [ + "required" : [ "testcases" ] }, - "testcase": { - "type": "object", - "description": "An individual test for a statement or expression", - "properties": { - "exception": { - "description": "Expected exception message", + "testcase" : { + "type" : "object", + "description" : "An individual test for a statement or expression", + "properties" : { + "exception" : { + "description" : "Expected exception message", "oneOf" : [ { - "$ref": "#/definitions/generalOutput" + "type" : "string" }, { "type" : "object", - "required" : ["types"], + "required" : [ + "types" + ], "properties" : { - "message": { - "$ref": "#/definitions/generalOutput" + "message" : { + "type" : "string" }, - "types": { + "types" : { "type" : "object", - "items": { - "type": "string" + "items" : { + "type" : "string" } } } } ] }, - "files": { - "type": "array", - "items": { - "$ref": "#/definitions/file" + "files" : { + "type" : "array", + "items" : { + "$ref" : "#/definitions/file" } }, - "return": { - "description": "Expected return value", - "type": [ + "return" : { + "description" : "Expected return value", + "type" : [ "array", "boolean", "integer", @@ -156,166 +158,271 @@ "string" ] }, - "return_raw": { - "description": "Value string to parse to the expected return value", - "$ref": "#/definitions/generalOutput" + "return_raw" : { + "description" : "Value string to parse to the expected return value", + "$ref" : "#/definitions/advancedValueOutputChannel" }, - "statement": { - "description": "Statement or expression to evaluate", - "$ref": "#/definitions/textualTypes" + "statement" : { + "description" : "Statement or expression to evaluate", + "type" : "string" }, - "expression": { - "description": "Statement or expression to evaluate", - "$ref": "#/definitions/textualTypes" + "expression" : { + "description" : "Statement or expression to evaluate", + "type" : "string" }, - "stderr": { - "description": "Expected output at stderr", - "$ref": "#/definitions/streamOutput" + "stderr" : { + "description" : "Expected output at stderr", + "$ref" : "#/definitions/textOutputChannel" }, - "stdout": { - "description": "Expected output at stdout", - "$ref": "#/definitions/streamOutput" + "stdout" : { + "description" : "Expected output at stdout", + "$ref" : "#/definitions/textOutputChannel" }, - "config": { - "$ref": "#/definitions/globalConfig", - "description": "Configuration settings at testcase level" + "config" : { + "$ref" : "#/definitions/globalConfig", + "description" : "Configuration settings at testcase level" }, - "stdin": { - "description": "Stdin for this context", - "$ref": "#/definitions/textualTypes" - }, - "exitCode": { - "type": "integer", - "description": "Expected exit code for the run" + "stdin" : { + "description" : "Stdin for this context", + "type" : [ + "string", + "number", + "integer", + "boolean" + ] }, - "arguments": { - "type": "array", - "description": "Array of program call arguments", - "items": { - "$ref": "#/definitions/textualTypes" + "exitCode" : { + "type" : "integer", + "description" : "Expected exit code for the run" + }, + "arguments" : { + "type" : "array", + "description" : "Array of program call arguments", + "items" : { + "type" : [ + "string", + "number", + "integer", + "boolean" + ] } } } }, - "configText": { - "type": "object", - "description": "Configuration properties for textual comparison and to configure if the expected value should be hidden or not", - "minProperties": 1, - "properties": { - "applyRounding": { - "description": "Apply rounding when comparing as float", - "type": "boolean" - }, - "caseInsensitive": { - "description": "Ignore case when comparing strings", - "type": "boolean" - }, - "ignoreWhitespace": { - "description": "Ignore leading and trailing whitespace", - "type": "boolean" - }, - "roundTo": { - "description": "The number of decimals to round at, when applying the rounding on floats", - "type": "integer" - }, - "tryFloatingPoint": { - "description": "Try comparing text as floating point numbers", - "type": "boolean" + "textOutputChannel" : { + "anyOf" : [ + { + "description" : "A simple value which is converted into a string.", + "type" : [ + "string", + "number", + "integer", + "boolean" + ] }, - "hideExpected": { - "description": "Hide the expected value in feedback (default: false), not recommended to use!", - "type": "boolean" + { + "$ref" : "#/definitions/advancedTextOutputChannel" } - } + ] }, - "localConfigGeneral": { - "type": "object", - "description": "General configuration properties for one test", - "required": [ + "advancedTextOutputChannel" : { + "type" : "object", + "description" : "Advanced output for a text output channel, such as stdout or stderr.", + "required" : [ "data" ], - "properties": { - "data": { - "$ref": "#/definitions/textualTypes" - } - } - }, - "localConfigText": { - "type": "object", - "description": "Textual configuration properties of one test", - "required": [ - "data", - "config" - ], - "properties": { - "data": { - "$ref": "#/definitions/textualTypes" + "properties" : { + "data" : { + "description" : "The expected data types.", + "type" : [ + "string", + "number", + "integer", + "boolean" + ] }, - "config": { - "$ref": "#/definitions/configText" + "config" : { + "$ref" : "#/definitions/textConfigurationOptions" } - } - }, - "globalConfig": { - "type": "object", - "description": "Global configuration properties", - "minProperties": 1, - "properties": { - "stdout": { - "$ref": "#/definitions/configText" + }, + "oneOf" : [ + { + "properties" : { + "evaluator" : { + "type" : "string", + "enum" : [ + "builtin" + ] + } + } }, - "stderr": { - "$ref": "#/definitions/configText" + { + "required" : [ + "evaluator", + "language", + "file" + ], + "properties" : { + "evaluator" : { + "type" : "string", + "enum" : [ + "custom" + ] + }, + "language" : { + "type" : "string", + "description" : "The programming language of the custom check function." + }, + "file" : { + "type" : "string", + "description" : "The path to the file containing the custom check function." + }, + "name" : { + "type" : "string", + "description" : "The name of the custom check function.", + "default" : "evaluate" + }, + "arguments" : { + "type" : "array", + "description" : "List of 'Python' values to use as arguments to the function.", + "items" : { + "type" : "string" + } + } + } } - } + ] }, - "streamOutput": { - "anyOf": [ + "advancedValueOutputChannel" : { + "oneOf" : [ { - "$ref": "#/definitions/textualTypes" + "type" : "string", + "description" : "A 'Python' value to parse and use as the expected type." }, { - "$ref": "#/definitions/localConfigText" + "type" : "object", + "description" : "A custom check function.", + "required" : [ + "value" + ], + "properties" : { + "value" : { + "type" : "string", + "description" : "The expected value." + } + }, + "oneOf" : [ + { + "properties" : { + "evaluator" : { + "type" : "string", + "enum" : [ + "builtin" + ] + } + } + }, + { + "required" : [ + "evaluator", + "language", + "file" + ], + "properties" : { + "evaluator" : { + "type" : "string", + "enum" : [ + "custom" + ] + }, + "language" : { + "type" : "string", + "description" : "The programming language of the custom check function." + }, + "file" : { + "type" : "string", + "description" : "The path to the file containing the custom check function." + }, + "name" : { + "type" : "string", + "description" : "The name of the custom check function.", + "default" : "evaluate" + }, + "arguments" : { + "type" : "array", + "description" : "List of 'Python' values to use as arguments to the function.", + "items" : { + "type" : "string" + } + } + } + } + ] } ] }, - "generalOutput": { - "$ref": "#/definitions/textualTypes" + "textConfigurationOptions" : { + "type" : "object", + "description" : "Configuration properties for textual comparison and to configure if the expected value should be hidden or not", + "minProperties" : 1, + "properties" : { + "applyRounding" : { + "description" : "Apply rounding when comparing as float", + "type" : "boolean" + }, + "caseInsensitive" : { + "description" : "Ignore case when comparing strings", + "type" : "boolean" + }, + "ignoreWhitespace" : { + "description" : "Ignore leading and trailing whitespace", + "type" : "boolean" + }, + "roundTo" : { + "description" : "The number of decimals to round at, when applying the rounding on floats", + "type" : "integer" + }, + "tryFloatingPoint" : { + "description" : "Try comparing text as floating point numbers", + "type" : "boolean" + }, + "hideExpected" : { + "description" : "Hide the expected value in feedback (default: false), not recommended to use!", + "type" : "boolean" + } + } + }, + "globalConfig" : { + "type" : "object", + "description" : "Global configuration properties", + "minProperties" : 1, + "properties" : { + "stdout" : { + "$ref" : "#/definitions/textConfigurationOptions" + }, + "stderr" : { + "$ref" : "#/definitions/textConfigurationOptions" + } + } }, - "file": { - "type": "object", - "description": "Tab definition with testcases", - "required": [ + "file" : { + "type" : "object", + "description" : "Path to a file for input.", + "required" : [ "name", "url" ], - "properties": { - "name": { - "type": "string", - "description": "File name" - }, - "url": { - "type": "string", - "format": "uri", - "description": "Relative path to the file in the `description` folder of a Dodona exercise" + "properties" : { + "name" : { + "type" : "string", + "description" : "File name" + }, + "url" : { + "type" : "string", + "format" : "uri", + "description" : "Relative path to the file in the `description` folder of a Dodona exercise" } } - }, - "textualTypes": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "integer" - }, - { - "type": "number" - }, - { - "type": "string" - } - ] } } } diff --git a/tested/dsl/translate_parser.py b/tested/dsl/translate_parser.py index ee80f4e2..ed96c92d 100644 --- a/tested/dsl/translate_parser.py +++ b/tested/dsl/translate_parser.py @@ -28,6 +28,7 @@ from tested.testsuite import ( Context, EmptyChannel, + EvaluationFunction, ExceptionOutputChannel, ExitCodeOutputChannel, ExpectedException, @@ -35,6 +36,7 @@ GenericTextEvaluator, MainInput, Output, + ProgrammedEvaluator, Suite, Tab, Testcase, @@ -143,19 +145,58 @@ def _convert_file(link_file: YamlDict) -> FileUrl: return FileUrl(name=link_file["name"], url=link_file["url"]) +def _convert_programmed_evaluator(stream: dict) -> ProgrammedEvaluator: + return ProgrammedEvaluator( + language=stream["language"], + function=EvaluationFunction( + file=stream["file"], name=stream.get("name", "evaluate") + ), + arguments=[ + parse_string(v, is_return=True) for v in stream.get("arguments", []) + ], + ) + + def _convert_text_output_channel( stream: YamlObject, config: dict, config_name: str ) -> TextOutputChannel: if isinstance(stream, str): data = stream config = config.get(config_name, {}) + return TextOutputChannel( + data=data, evaluator=GenericTextEvaluator(options=config) + ) else: assert isinstance(stream, dict) - data = stream["data"] - existing_config = config.get(config_name, {}) - config = _deepen_config_level(stream, existing_config) + if "evaluator" not in stream or stream["evaluator"] == "builtin": + data = stream["data"] + existing_config = config.get(config_name, {}) + config = _deepen_config_level(stream, existing_config) + return TextOutputChannel( + data=data, evaluator=GenericTextEvaluator(options=config) + ) + elif stream["evaluator"] == "custom": + return TextOutputChannel( + data=stream["data"], evaluator=_convert_programmed_evaluator(stream) + ) + raise TypeError(f"Unknown text evaluator type: {stream['evaluator']}") - return TextOutputChannel(data=data, evaluator=GenericTextEvaluator(options=config)) + +def _convert_advanced_value_output_channel(stream: YamlObject) -> ValueOutputChannel: + if isinstance(stream, str): + value = parse_string(stream, is_return=True) + return ValueOutputChannel(value=value) + else: + assert isinstance(stream, dict) + if "evaluator" not in stream or stream["evaluator"] == "builtin": + value = parse_string(stream["value"], is_return=True) + return ValueOutputChannel(value=value) + elif stream["evaluator"] == "custom": + return ValueOutputChannel( + value=parse_string(stream["value"], is_return=True), + evaluator=_convert_programmed_evaluator(stream), + ) + raise TypeError(f"Unknown value evaluator type: {stream['evaluator']}") def _convert_testcase(testcase: YamlDict, previous_config: dict) -> Testcase: @@ -183,6 +224,7 @@ def _convert_testcase(testcase: YamlDict, previous_config: dict) -> Testcase: message = exception types = None else: + assert isinstance(exception, dict) message = exception.get("message") types = exception["types"] output.exception = ExceptionOutputChannel( @@ -197,7 +239,7 @@ def _convert_testcase(testcase: YamlDict, previous_config: dict) -> Testcase: if (result := testcase.get("return_raw")) is not None: if "return" in testcase: raise ValueError("Both a return and return_raw value is not allowed.") - output.result = ValueOutputChannel(value=parse_string(result, True)) + output.result = _convert_advanced_value_output_channel(result) # TODO: allow propagation of files... files = [] diff --git a/tests/test_dsl_yaml.py b/tests/test_dsl_yaml.py index 30bc0d3a..147cf1dc 100644 --- a/tests/test_dsl_yaml.py +++ b/tests/test_dsl_yaml.py @@ -1,3 +1,5 @@ +from pathlib import Path + import pytest from tested.datatypes import ( @@ -6,10 +8,25 @@ BasicNumericTypes, BasicObjectTypes, BasicSequenceTypes, + BasicStringTypes, ) from tested.dsl import translate_to_test_suite -from tested.serialisation import Assignment, FunctionCall, ObjectType, SequenceType -from tested.testsuite import GenericTextEvaluator, parse_test_suite +from tested.serialisation import ( + Assignment, + FunctionCall, + NumberType, + ObjectType, + SequenceType, + StringType, +) +from tested.testsuite import ( + GenericTextEvaluator, + GenericValueEvaluator, + ProgrammedEvaluator, + TextOutputChannel, + ValueOutputChannel, + parse_test_suite, +) def test_parse_one_tab_ctx(): @@ -493,3 +510,189 @@ def test_empty_constructor_with_param(function_name, result): assert isinstance(test.input, FunctionCall) assert test.output.result.value.type == result assert len(test.output.result.value.data) == 0 + + +def test_text_built_in_checks_implied(): + yaml_str = f""" + - tab: 'Test' + contexts: + - testcases: + - statement: 'test()' + stdout: + data: "hallo" + """ + json_str = translate_to_test_suite(yaml_str) + suite = parse_test_suite(json_str) + assert len(suite.tabs) == 1 + tab = suite.tabs[0] + assert len(tab.contexts) == 1 + testcases = tab.contexts[0].testcases + assert len(testcases) == 1 + test = testcases[0] + assert isinstance(test.input, FunctionCall) + assert isinstance(test.output.stdout, TextOutputChannel) + assert isinstance(test.output.stdout.evaluator, GenericTextEvaluator) + assert test.output.stdout.data == "hallo" + + +def test_text_built_in_checks_explicit(): + yaml_str = f""" + - tab: 'Test' + contexts: + - testcases: + - statement: 'test()' + stdout: + data: "hallo" + evaluator: "builtin" + """ + json_str = translate_to_test_suite(yaml_str) + suite = parse_test_suite(json_str) + assert len(suite.tabs) == 1 + tab = suite.tabs[0] + assert len(tab.contexts) == 1 + testcases = tab.contexts[0].testcases + assert len(testcases) == 1 + test = testcases[0] + assert isinstance(test.input, FunctionCall) + assert isinstance(test.output.stdout, TextOutputChannel) + assert isinstance(test.output.stdout.evaluator, GenericTextEvaluator) + assert test.output.stdout.data == "hallo" + + +def test_text_custom_checks_correct(): + yaml_str = f""" + - tab: 'Test' + contexts: + - testcases: + - statement: 'test()' + stdout: + data: "hallo" + evaluator: "custom" + language: "python" + file: "test.py" + name: "evaluate_test" + arguments: ["'yes'", "5", "set([5, 5])"] + """ + json_str = translate_to_test_suite(yaml_str) + suite = parse_test_suite(json_str) + assert len(suite.tabs) == 1 + tab = suite.tabs[0] + assert len(tab.contexts) == 1 + testcases = tab.contexts[0].testcases + assert len(testcases) == 1 + test = testcases[0] + assert isinstance(test.input, FunctionCall) + assert isinstance(test.output.stdout, TextOutputChannel) + assert isinstance(test.output.stdout.evaluator, ProgrammedEvaluator) + assert test.output.stdout.data == "hallo" + evaluator = test.output.stdout.evaluator + assert evaluator.language == "python" + assert evaluator.function.name == "evaluate_test" + assert evaluator.function.file == Path("test.py") + assert evaluator.arguments == [ + StringType(type=BasicStringTypes.TEXT, data="yes"), + NumberType(type=BasicNumericTypes.INTEGER, data=5), + SequenceType( + type=BasicSequenceTypes.SET, + data=[ + NumberType(type=BasicNumericTypes.INTEGER, data=5), + NumberType(type=BasicNumericTypes.INTEGER, data=5), + ], + ), + ] + + +def test_value_built_in_checks_implied(): + yaml_str = f""" + - tab: 'Test' + contexts: + - testcases: + - statement: 'test()' + return_raw: + value: "'hallo'" + """ + json_str = translate_to_test_suite(yaml_str) + suite = parse_test_suite(json_str) + assert len(suite.tabs) == 1 + tab = suite.tabs[0] + assert len(tab.contexts) == 1 + testcases = tab.contexts[0].testcases + assert len(testcases) == 1 + test = testcases[0] + assert isinstance(test.input, FunctionCall) + assert isinstance(test.output.result, ValueOutputChannel) + assert isinstance(test.output.result.evaluator, GenericValueEvaluator) + assert test.output.result.value == StringType( + type=BasicStringTypes.TEXT, data="hallo" + ) + + +def test_value_built_in_checks_explicit(): + yaml_str = f""" + - tab: 'Test' + contexts: + - testcases: + - statement: 'test()' + return_raw: + value: "'hallo'" + evaluator: "builtin" + """ + json_str = translate_to_test_suite(yaml_str) + suite = parse_test_suite(json_str) + assert len(suite.tabs) == 1 + tab = suite.tabs[0] + assert len(tab.contexts) == 1 + testcases = tab.contexts[0].testcases + assert len(testcases) == 1 + test = testcases[0] + assert isinstance(test.input, FunctionCall) + assert isinstance(test.output.result, ValueOutputChannel) + assert isinstance(test.output.result.evaluator, GenericValueEvaluator) + assert test.output.result.value == StringType( + type=BasicStringTypes.TEXT, data="hallo" + ) + + +def test_value_custom_checks_correct(): + yaml_str = f""" + - tab: 'Test' + contexts: + - testcases: + - statement: 'test()' + return_raw: + value: "'hallo'" + evaluator: "custom" + language: "python" + file: "test.py" + name: "evaluate_test" + arguments: ["'yes'", "5", "set([5, 5])"] + """ + json_str = translate_to_test_suite(yaml_str) + suite = parse_test_suite(json_str) + assert len(suite.tabs) == 1 + tab = suite.tabs[0] + assert len(tab.contexts) == 1 + testcases = tab.contexts[0].testcases + assert len(testcases) == 1 + test = testcases[0] + assert isinstance(test.input, FunctionCall) + assert isinstance(test.output.result, ValueOutputChannel) + assert isinstance(test.output.result.evaluator, ProgrammedEvaluator) + assert test.output.result.value == StringType( + type=BasicStringTypes.TEXT, data="hallo" + ) + evaluator = test.output.result.evaluator + assert evaluator.language == "python" + assert evaluator.function.name == "evaluate_test" + assert evaluator.function.file == Path("test.py") + assert evaluator.arguments == [ + StringType(type=BasicStringTypes.TEXT, data="yes"), + NumberType(type=BasicNumericTypes.INTEGER, data=5), + SequenceType( + type=BasicSequenceTypes.SET, + data=[ + NumberType(type=BasicNumericTypes.INTEGER, data=5), + NumberType(type=BasicNumericTypes.INTEGER, data=5), + ], + ), + ]