From 9e50541ecdb992999931a088a90d8e8f7bbb8c6b Mon Sep 17 00:00:00 2001 From: Niko Strijbol Date: Wed, 6 Sep 2023 17:15:17 +0200 Subject: [PATCH] Allow merging across tabs --- tested/configs.py | 4 +- tested/descriptions/__init__.py | 2 +- tested/descriptions/converters.py | 2 +- tested/descriptions/renderer.py | 4 +- tested/judge/compilation.py | 12 +- tested/judge/core.py | 355 +++++++++++++------------ tested/judge/execution.py | 282 +++++++++----------- tested/judge/linter.py | 2 +- tested/judge/planning.py | 63 +++++ tested/judge/programmed.py | 14 +- tested/languages/c/generators.py | 40 +-- tested/languages/config.py | 26 +- tested/languages/conventionalize.py | 5 +- tested/languages/csharp/config.py | 13 +- tested/languages/csharp/generators.py | 6 +- tested/languages/generation.py | 51 ++-- tested/languages/haskell/generators.py | 8 +- tested/languages/java/generators.py | 6 +- tested/languages/kotlin/config.py | 12 +- tested/languages/kotlin/generators.py | 4 +- tested/languages/preparation.py | 57 ++-- tested/languages/python/generators.py | 4 +- tested/languages/utils.py | 16 +- tested/oracles/common.py | 2 +- tested/oracles/exception.py | 10 +- tested/oracles/value.py | 2 +- tests/test_functionality.py | 3 +- tests/test_serialisation.py | 22 +- 28 files changed, 539 insertions(+), 488 deletions(-) create mode 100644 tested/judge/planning.py diff --git a/tested/configs.py b/tested/configs.py index e84054e3..f924973c 100644 --- a/tested/configs.py +++ b/tested/configs.py @@ -125,7 +125,7 @@ def options(self) -> Options: class Bundle: """A bundle of arguments and configs for running everything.""" - lang_config: "Language" + language: "Language" global_config: GlobalConfig out: IO @@ -194,4 +194,4 @@ def create_bundle( suite=suite, ) lang_config = langs.get_language(global_config, language) - return Bundle(lang_config=lang_config, global_config=global_config, out=output) + return Bundle(language=lang_config, global_config=global_config, out=output) diff --git a/tested/descriptions/__init__.py b/tested/descriptions/__init__.py index 10a9e0ef..87f4abf9 100644 --- a/tested/descriptions/__init__.py +++ b/tested/descriptions/__init__.py @@ -34,7 +34,7 @@ def process_problem_statement( bundle = Bundle( global_config=global_config, - lang_config=language, + language=language, out=open(os.devnull, "w"), ) diff --git a/tested/descriptions/converters.py b/tested/descriptions/converters.py index c6305f7c..689ac7e4 100644 --- a/tested/descriptions/converters.py +++ b/tested/descriptions/converters.py @@ -48,7 +48,7 @@ def convert_templated_problem(bundle: Bundle, raw_description: str) -> str: description_template = Template( source=raw_description, autoescape=False, keep_trailing_newline=True ) - language = bundle.lang_config + language = bundle.language set_locale(bundle.config.natural_language) return description_template.render( # Conventionalize functions diff --git a/tested/descriptions/renderer.py b/tested/descriptions/renderer.py index 35581117..820489fa 100644 --- a/tested/descriptions/renderer.py +++ b/tested/descriptions/renderer.py @@ -20,7 +20,7 @@ def render_one_statement(bundle: Bundle, statement: str) -> str: parsed_string = parse_string(statement) generated_statement = generate_statement(bundle, parsed_string) # Allow the language to modify the template a bit. - return bundle.lang_config.cleanup_description(generated_statement) + return bundle.language.cleanup_description(generated_statement) class TestedRenderer(MarkdownRenderer): @@ -40,7 +40,7 @@ def _render_doctest(self, element: block.FencedCode) -> str: doctests = self._doctest_parser.get_examples(raw_code) resulting_lines = [] - prompt = self.bundle.lang_config.get_declaration_metadata().get("prompt", ">") + prompt = self.bundle.language.get_declaration_metadata().get("prompt", ">") # Both the doctests and the results are parsed as values in the DSL. for examples in doctests: diff --git a/tested/judge/compilation.py b/tested/judge/compilation.py index 48365296..9cc44816 100644 --- a/tested/judge/compilation.py +++ b/tested/judge/compilation.py @@ -6,8 +6,9 @@ from typing import List, Optional, Tuple, Union from tested.configs import Bundle -from tested.dodona import AnnotateCode, Message, Status +from tested.dodona import Status from tested.internationalization import get_i18n_string +from tested.judge.planning import CompilationResult from tested.judge.utils import BaseExecutionResult, run_command from tested.languages.config import FileFilter, Language from tested.languages.utils import convert_stacktrace_to_clickable_feedback @@ -53,7 +54,7 @@ def run_compilation( decide to fallback to individual mode if the compilation result is not positive. """ - command, files = bundle.lang_config.compilation(dependencies) + command, files = bundle.language.compilation(dependencies) _logger.debug( "Generating files with command %s in directory %s", command, directory ) @@ -64,25 +65,24 @@ def run_compilation( def process_compile_results( language_config: Language, results: Optional[BaseExecutionResult] -) -> Tuple[List[Message], Status, List[AnnotateCode]]: +) -> CompilationResult: """ Process the output of a compilation step. It will convert the result of the command into a list of messages and a status. If the status is not correct, the messages and status may be passed to Dodona unchanged. Alternatively, they can be kept to show them with the first context. """ - messages = [] # There was no compilation if results is None: - return messages, Status.CORRECT, [] + return [], Status.CORRECT, [] show_stdout = False _logger.debug("Received stderr from compiler: " + results.stderr) compiler_messages, annotations, stdout, stderr = language_config.compiler_output( results.stdout, results.stderr ) - messages.extend(compiler_messages) + messages = compiler_messages shown_messages = annotations or compiler_messages # Report stderr. diff --git a/tested/judge/core.py b/tested/judge/core.py index 50a24171..10b642d7 100644 --- a/tested/judge/core.py +++ b/tested/judge/core.py @@ -2,7 +2,6 @@ import shutil import time from pathlib import Path -from typing import List, Optional, Tuple from tested.configs import Bundle from tested.dodona import ( @@ -23,20 +22,23 @@ from tested.judge.compilation import process_compile_results, run_compilation from tested.judge.evaluation import evaluate_context_results, terminate from tested.judge.execution import ( - Execution, ExecutionResult, - execute_execution, - merge_contexts_into_units, + compile_unit, + execute_unit, + filter_files, + plan_test_suite, + set_up_unit, ) from tested.judge.linter import run_linter -from tested.judge.utils import copy_from_paths_to_path -from tested.languages.conventionalize import ( - EXECUTION_PREFIX, - execution_name, - submission_file, +from tested.judge.planning import ( + CompilationResult, + ExecutionPlan, + PlannedContext, + PlannedExecutionUnit, ) +from tested.judge.utils import copy_from_paths_to_path +from tested.languages.conventionalize import EXECUTION_PREFIX, submission_file from tested.languages.generation import generate_execution, generate_selector -from tested.testsuite import ExecutionMode _logger = logging.getLogger(__name__) @@ -52,7 +54,7 @@ def judge(bundle: Bundle): # Begin by checking if the given test suite is executable in this language. _logger.info("Checking supported features...") set_locale(bundle.config.natural_language) - if not is_supported(bundle.lang_config): + if not is_supported(bundle.language): report_update(bundle.out, StartJudgement()) report_update( bundle.out, @@ -70,7 +72,6 @@ def judge(bundle: Bundle): _logger.info("Required features not supported.") return # Not all required features are supported. - mode = bundle.config.options.mode collector = OutputManager(bundle.out) collector.add(StartJudgement()) @@ -78,69 +79,86 @@ def judge(bundle: Bundle): start = time.perf_counter() # Run the linter. + # TODO: move to the back? Or at least limit the time. run_linter(bundle, collector, max_time) if time.perf_counter() - start > max_time: terminate(bundle, collector, Status.TIME_LIMIT_EXCEEDED) return - _logger.info("Start generating code...") - common_dir, files, selector = _generate_files(bundle, mode) - # Add the selector to the dependencies. - if selector: - files.append(selector) - - if mode == ExecutionMode.PRECOMPILATION: - assert not bundle.lang_config.needs_selector() or selector is not None - files = _copy_workdir_source_files(bundle, common_dir) + files - - # Compile all code in one go. - _logger.info("Running precompilation step...") - remaining = max_time - (time.perf_counter() - start) - result, compilation_files = run_compilation( - bundle, common_dir, files, remaining - ) + _logger.debug("Planning execution") + planned_units = plan_test_suite(bundle) + + _logger.debug("Generating files") + common_dir, dependencies, selector = _generate_files(bundle, planned_units) + + # Create an execution plan. + plan = ExecutionPlan( + units=planned_units, + common_directory=common_dir, + files=dependencies, + selector=selector, + max_time=max_time, + start_time=start, + ) + + messages, status, annotations = precompile(bundle, plan) + + # If something went horribly wrong, and the compilation itself caused a timeout or memory issue, bail now. + if status in (Status.TIME_LIMIT_EXCEEDED, Status.MEMORY_LIMIT_EXCEEDED): + _logger.info(f"Compilation resulted in {status}. Bailing now.") + collector.add_messages(messages) + collector.add_all(annotations) + terminate(bundle, collector, status) + return - messages, status, annotations = process_compile_results( - bundle.lang_config, result - ) + # If an individual execution unit should be compiled or not. + should_unit_compile = False - # If there is no result, there was no compilation. - if not result: - precompilation_result = None - else: - # Handle timout if necessary. - if result.timeout or result.memory: - collector.add_messages(messages) - collector.add_all(annotations) - status = ( - Status.TIME_LIMIT_EXCEEDED - if result.timeout - else Status.MEMORY_LIMIT_EXCEEDED - ) - terminate(bundle, collector, status) - return + # If the compilation failed, but we are allowed to use a fallback, do that. + if status != Status.CORRECT and bundle.config.options.allow_fallback: + _logger.info( + "Compilation error, falling back to compiling each unit individually." + ) + should_unit_compile = True + # Remove the selector file from the dependencies. + # Otherwise, it will keep being compiled, which we want to avoid. + if bundle.language.needs_selector(): + # The last element in the list is the "selector". + plan.files.pop() + # When compilation succeeded, only add annotations + elif status == Status.CORRECT: + collector.add_messages(messages) + collector.add_all(annotations) + else: + collector.add_messages(messages) + collector.add_all(annotations) + terminate( + bundle, + collector, + StatusMessage( + enum=status, + human=get_i18n_string("judge.core.invalid.source-code"), + ), + ) + _logger.info("Compilation error without fallback") + return # Compilation error occurred, useless to continue. - assert not result.timeout - assert not result.memory - - precompilation_result = (messages, status) - - # If we have fallback, discard all results. - if status != Status.CORRECT and bundle.config.options.allow_fallback: - mode = ExecutionMode.INDIVIDUAL - _logger.info("Compilation error, falling back to individual mode") - # Remove the selector file from the dependencies. - # Otherwise, it will keep being compiled, which we want to avoid. - if selector and bundle.lang_config.needs_selector(): - files.remove(selector) - # When compilation succeeded, only add annotations - elif status == Status.CORRECT: - files = compilation_files - collector.add_all(annotations) - else: + _logger.info("Starting execution.") + # Create a list of runs we want to execute. + for i, planned_unit in enumerate(plan.units): + # Prepare the unit. + execution_dir, dependencies = set_up_unit(bundle, plan, i) + + should_attempt_execution = True + # If compilation is necessary, do it. + if should_unit_compile: + (messages, status, annotations), dependencies = compile_unit( + bundle, plan, i, execution_dir, dependencies + ) + if status == Status.TIME_LIMIT_EXCEEDED: + # There is no more, so stop now. collector.add_messages(messages) collector.add_all(annotations) - terminate( bundle, collector, @@ -149,128 +167,123 @@ def judge(bundle: Bundle): human=get_i18n_string("judge.core.invalid.source-code"), ), ) - _logger.info("Compilation error without fallback") - return # Compilation error occurred, useless to continue. - else: - precompilation_result = None + return + elif status != Status.CORRECT: + # TODO: go back and start again with tabs? + should_attempt_execution = False + else: + collector.add_messages(messages) + collector.add_all(annotations) - _logger.info("Starting judgement...") - # Create a list of runs we want to execute. - for tab_index, tab in enumerate(bundle.suite.tabs): - collector.add(StartTab(title=tab.name, hidden=tab.hidden)) - assert tab.contexts - execution_units = merge_contexts_into_units(tab.contexts) - executions = [] - offset = 0 - for execution_index, unit in enumerate(execution_units): - executions.append( - Execution( - unit=unit, - context_offset=offset, - execution_name=execution_name( - bundle.lang_config, tab_index, execution_index - ), - execution_index=execution_index, - mode=mode, - common_directory=common_dir, - files=files, - precompilation_result=precompilation_result, - collector=collector, - ) + # Execute the unit. + if should_attempt_execution: + remaining_time = plan.remaining_time() + execution_result, status = execute_unit( + bundle, planned_unit, execution_dir, dependencies, remaining_time ) - offset += len(unit.contexts) - - remaining = max_time - (time.perf_counter() - start) - result = _single_execution(bundle, executions, remaining) + else: + execution_result = None + + result_status = _process_results( + bundle=bundle, + unit=planned_unit, + execution_result=execution_result, + compiler_messages=messages, + status=status, + execution_dir=execution_dir, + collector=collector, + ) - if result in ( + if result_status in ( Status.TIME_LIMIT_EXCEEDED, Status.MEMORY_LIMIT_EXCEEDED, Status.OUTPUT_LIMIT_EXCEEDED, ): - terminate(bundle, collector, result) + terminate(bundle, collector, result_status) return - collector.add(CloseTab(), tab_index) collector.add(CloseJudgement()) -def _single_execution( - bundle: Bundle, items: List[Execution], max_time: float -) -> Optional[Status]: +def precompile(bundle: Bundle, plan: ExecutionPlan) -> CompilationResult: """ - Process items in a non-threaded way. + Attempt to precompile the execution plan. - :param bundle: The configuration bundle. - :param items: The contexts to execute. - :param max_time: The max amount of time. + :param bundle: The options. + :param plan: The execution plan. + :return: The results of the precompilation step. """ - start = time.perf_counter() - for execution in items: - remaining = max_time - (time.perf_counter() - start) - execution_result, m, s, p = execute_execution(bundle, execution, remaining) + _logger.info("Starting precompilation phase") + assert not bundle.language.needs_selector() or plan.selector is not None + plan_files = filter_files(plan.files, plan.common_directory) + files = _copy_workdir_source_files(bundle, plan.common_directory) + [ + str(x) for x in plan_files + ] + remaining_time = plan.remaining_time() - status = _process_results(bundle, execution, execution_result, m, s, p) + # Do the actual compiling. + result, compilation_files = run_compilation( + bundle, plan.common_directory, files, remaining_time + ) - if status: - return status - return None + # Update the files if the compilation succeeded. + processed_results = process_compile_results(bundle.language, result) + if processed_results[1] == Status.CORRECT: + plan.files = compilation_files + + return processed_results def _generate_files( - bundle: Bundle, mode: ExecutionMode -) -> Tuple[Path, List[str], Optional[str]]: + bundle: Bundle, execution_plan: list[PlannedExecutionUnit] +) -> tuple[Path, list[str], str | None]: """ Generate all necessary files, using the templates. This creates a common directory, copies all dependencies to that folder and runs the generation. """ - dependencies = bundle.lang_config.initial_dependencies() + dependencies = bundle.language.initial_dependencies() common_dir = Path(bundle.config.workdir, f"common") common_dir.mkdir() - _logger.debug(f"Generating files in common directory %s", common_dir) + _logger.debug(f"Generating files in common directory {common_dir}") # Copy dependencies - dependency_paths = bundle.lang_config.path_to_dependencies() + dependency_paths = bundle.language.path_to_dependencies() copy_from_paths_to_path(dependency_paths, dependencies, common_dir) # Copy the submission file. - submission = submission_file(bundle.lang_config) + submission = submission_file(bundle.language) solution_path = common_dir / submission # noinspection PyTypeChecker shutil.copy2(bundle.config.source, solution_path) dependencies.append(submission) # Allow modifications of the submission file. - bundle.lang_config.modify_solution(solution_path) + bundle.language.modify_solution(solution_path) # The names of the executions for the test suite. execution_names = [] # Generate the files for each execution. - for tab_i, tab in enumerate(bundle.suite.tabs): - assert tab.contexts - execution_units = merge_contexts_into_units(tab.contexts) - for unit_i, unit in enumerate(execution_units): - exec_name = execution_name(bundle.lang_config, tab_i, unit_i) - _logger.debug(f"Generating file for execution {exec_name}") - generated, evaluators = generate_execution( - bundle=bundle, - destination=common_dir, - execution_unit=unit, - execution_name=exec_name, - ) - # Copy functions to the directory. - for evaluator in evaluators: - source = Path(bundle.config.resources) / evaluator - _logger.debug("Copying oracle from %s to %s", source, common_dir) - shutil.copy2(source, common_dir) - dependencies.extend(evaluators) - dependencies.append(generated) - execution_names.append(exec_name) - - if mode == ExecutionMode.PRECOMPILATION and bundle.lang_config.needs_selector(): - _logger.debug("Generating selector for PRECOMPILATION mode.") + for execution_unit in execution_plan: + _logger.debug(f"Generating file for execution {execution_unit.name}") + generated, evaluators = generate_execution( + bundle=bundle, destination=common_dir, execution_unit=execution_unit + ) + + # Copy functions to the directory. + for evaluator in evaluators: + source = Path(bundle.config.resources) / evaluator + _logger.debug(f"Copying oracle from {source} to {common_dir}") + shutil.copy2(source, common_dir) + + dependencies.extend(evaluators) + dependencies.append(generated) + execution_names.append(execution_unit.name) + + if bundle.language.needs_selector(): + _logger.debug("Generating selector.") generated = generate_selector(bundle, common_dir, execution_names) + dependencies.append(generated) else: generated = None return common_dir, dependencies, generated @@ -278,41 +291,55 @@ def _generate_files( def _process_results( bundle: Bundle, - execution: Execution, - execution_result: Optional[ExecutionResult], - compiler_messages: List[Message], - s: Status, - p: Path, -) -> Optional[Status]: + collector: OutputManager, + unit: PlannedExecutionUnit, + execution_result: ExecutionResult | None, + compiler_messages: list[Message], + status: Status, + execution_dir: Path, +) -> Status | None: if execution_result: context_results = execution_result.to_context_results() else: - context_results = [None] * len(execution.unit.contexts) + context_results = [None] * len(unit.contexts) + + current_tab_index = -1 - for index, (context, context_result) in enumerate( - zip(execution.unit.contexts, context_results), execution.context_offset - ): - execution.collector.add(StartContext(description=context.description)) + for planned, context_result in zip(unit.contexts, context_results): + planned: PlannedContext + if current_tab_index < planned.tab_index: + # Close the previous tab if necessary. + if current_tab_index >= 0: + collector.add(CloseTab(), current_tab_index) + current_tab_index = current_tab_index + 1 + tab = bundle.suite.tabs[current_tab_index] + collector.add(StartTab(title=tab.name, hidden=tab.hidden)) + + # Handle the contexts. + collector.add(StartContext(description=planned.context.description)) continue_ = evaluate_context_results( bundle, - context=context, + context=planned.context, exec_results=context_result, - compiler_results=(compiler_messages, s), - context_dir=p, - collector=execution.collector, + compiler_results=(compiler_messages, status), + context_dir=execution_dir, + collector=collector, ) # We handled the compiler messages above, so remove them. compiler_messages = [] - execution.collector.add(CloseContext(), index) + collector.add(CloseContext(), planned.context_index) if continue_ in (Status.TIME_LIMIT_EXCEEDED, Status.MEMORY_LIMIT_EXCEEDED): return continue_ + + # Finish the final tab. + collector.add(CloseTab(), current_tab_index) return None -def _copy_workdir_source_files(bundle: Bundle, common_dir: Path) -> List[str]: +def _copy_workdir_source_files(bundle: Bundle, common_dir: Path) -> list[str]: """ Copy additional source files from the workdir to the common dir @@ -324,16 +351,16 @@ def _copy_workdir_source_files(bundle: Bundle, common_dir: Path) -> List[str]: def recursive_copy(src: Path, dst: Path): for origin in src.iterdir(): file = origin.name.lower() - if origin.is_file() and bundle.lang_config.is_source_file(origin): + if origin.is_file() and bundle.language.is_source_file(origin): source_files.append(str(dst / origin.name)) - _logger.debug("Copying %s to %s", origin, dst) + _logger.debug(f"Copying {origin} to {dst}") shutil.copy2(origin, dst) elif ( origin.is_dir() and not file.startswith(EXECUTION_PREFIX) and file != "common" ): - _logger.debug("Iterate subdir %s", dst / file) + _logger.debug(f"Iterate subdir {dst / file}") shutil.copytree(origin, dst / file) recursive_copy(bundle.config.workdir, common_dir) diff --git a/tested/judge/execution.py b/tested/judge/execution.py index 5ee51cf9..0844132c 100644 --- a/tested/judge/execution.py +++ b/tested/judge/execution.py @@ -1,7 +1,6 @@ import itertools import logging import shutil -import time from pathlib import Path from typing import List, Optional, Tuple, Union, cast @@ -9,13 +8,22 @@ from tested.configs import Bundle from tested.dodona import Message, Status -from tested.judge.collector import OutputManager from tested.judge.compilation import process_compile_results, run_compilation +from tested.judge.planning import ( + CompilationResult, + ExecutionPlan, + PlannedContext, + PlannedExecutionUnit, +) from tested.judge.utils import BaseExecutionResult, run_command from tested.languages.config import FileFilter -from tested.languages.conventionalize import EXECUTION_PREFIX, selector_name +from tested.languages.conventionalize import ( + EXECUTION_PREFIX, + execution_name, + selector_name, +) from tested.languages.preparation import exception_file, value_file -from tested.testsuite import Context, EmptyChannel, ExecutionMode, MainInput +from tested.testsuite import EmptyChannel, MainInput from tested.utils import safe_del _logger = logging.getLogger(__name__) @@ -104,37 +112,13 @@ def to_context_results( @define class ExecutionUnit: - """ - Combines a set of contexts that will be executed together. - """ - - contexts: List[Context] - - def get_stdin(self, resources: Path) -> str: - return "\n".join(c.get_stdin(resources) or "" for c in self.contexts) - - def has_main_testcase(self) -> bool: - return self.contexts[0].has_main_testcase() - - def has_exit_testcase(self) -> bool: - return self.contexts[-1].has_exit_testcase() - - -@define -class Execution: """ Contains an execution unit and various metadata. """ - unit: ExecutionUnit - context_offset: int - execution_name: str - execution_index: int - mode: ExecutionMode - common_directory: Path + planned: PlannedExecutionUnit files: Union[List[str], FileFilter] precompilation_result: Optional[Tuple[List[Message], Status]] - collector: OutputManager def filter_files(files: Union[List[str], FileFilter], directory: Path) -> List[Path]: @@ -172,7 +156,7 @@ def execute_file( """ _logger.info("Starting execution on file %s", executable_name) - command = bundle.lang_config.execution( + command = bundle.language.execution( cwd=working_directory, file=executable_name, arguments=[argument] if argument else [], @@ -200,127 +184,104 @@ def copy_workdir_files(bundle: Bundle, context_dir: Path): shutil.copytree(origin, context_dir / file) -def execute_execution( - bundle: Bundle, args: Execution, max_time: float -) -> Tuple[Optional[ExecutionResult], List[Message], Status, Path]: - """ - Execute an execution. - """ - lang_config = bundle.lang_config - start = time.perf_counter() +def _get_contents_or_empty(file_path: Path) -> str: + try: + with open(file_path, "r") as f: + return f.read() + except FileNotFoundError: + _logger.warning(f"File not found, looked in {file_path}") + return "" + +def set_up_unit( + bundle: Bundle, plan: ExecutionPlan, which_unit: int +) -> tuple[Path, list[Path]]: + unit = plan.units[which_unit] # Create a working directory for the execution. - execution_dir = Path(bundle.config.workdir, args.execution_name) + execution_dir = Path(bundle.config.workdir, unit.name) execution_dir.mkdir() - _logger.info("Executing %s in path %s", args.execution_name, execution_dir) + _logger.info(f"Preparing {unit.name} in {execution_dir}") # Filter dependencies of the global compilation results. - dependencies = filter_files(args.files, args.common_directory) - dependencies = bundle.lang_config.filter_dependencies( - dependencies, args.execution_name - ) - _logger.debug("Dependencies are %s", dependencies) + dependencies = filter_files(plan.files, plan.common_directory) + dependencies = bundle.language.filter_dependencies(dependencies, unit.name) + _logger.debug(f"Dependencies are {dependencies}") copy_workdir_files(bundle, execution_dir) # Copy files from the common directory to the context directory. for file in dependencies: - origin = args.common_directory / file + origin = plan.common_directory / file destination = execution_dir / file # Ensure we preserve subdirectories. destination.parent.mkdir(parents=True, exist_ok=True) - _logger.debug("Copying %s to %s", origin, destination) + _logger.debug(f"Copying {origin} to {destination}") if origin == destination: continue # Don't copy the file to itself shutil.copy2(origin, destination) - # If needed, do a compilation. - if args.mode == ExecutionMode.INDIVIDUAL: - _logger.info("Compiling context %s in INDIVIDUAL mode...", args.execution_name) - remaining = max_time - (time.perf_counter() - start) - deps = [str(x) for x in dependencies] - result, files = run_compilation(bundle, execution_dir, deps, remaining) - - # A new compilation means a new file filtering - files = filter_files(files, execution_dir) + return execution_dir, dependencies - # Process compilation results. - messages, status, annotations = process_compile_results(lang_config, result) - for annotation in annotations: - args.collector.add(annotation) - - if status != Status.CORRECT: - _logger.debug("Compilation of individual context failed.") - _logger.debug("Aborting executing of this context.") - return None, messages, status, execution_dir - - _logger.debug("Executing context %s in INDIVIDUAL mode...", args.execution_name) +def compile_unit( + bundle: Bundle, + plan: ExecutionPlan, + which_unit: int, + execution_dir: Path, + dependencies: list[Path], +) -> tuple[CompilationResult, list[Path]]: + unit = plan.units[which_unit] + _logger.info(f"Compiling unit {unit.name}") + remaining = plan.remaining_time() + deps = [str(x) for x in dependencies] + result, files = run_compilation(bundle, execution_dir, deps, remaining) + + # A new compilation means a new file filtering + files = filter_files(files, execution_dir) + + # Process compilation results. + processed_results = process_compile_results(bundle.language, result) + return processed_results, files + + +def execute_unit( + bundle: Bundle, + unit: PlannedExecutionUnit, + execution_dir: Path, + dependencies: list[Path], + remaining_time: float, +) -> tuple[ExecutionResult | None, Status]: + """ + Execute a unit. - executable, messages, status, annotations = lang_config.find_main_file( - files, args.execution_name, messages - ) + This function assumes the files have been prepared (set_up_unit) and + compilation has happened if needed. - for annotation in annotations: - args.collector.add(annotation) + :param bundle: The bundle. + :param unit: The unit to execute. + :param execution_dir: The directory in which we execute. + :param dependencies: The dependencies. + :param remaining_time: The remaining time for this execution. + """ + _logger.info(f"Executing unit {unit.name}") - if status != Status.CORRECT: - return None, messages, status, execution_dir + files = list(dependencies) # A copy of the files. - files.remove(executable) - stdin = args.unit.get_stdin(bundle.config.resources) - argument = None + if bundle.language.needs_selector(): + main_file_name = selector_name(bundle.language) + argument = unit.name else: - result, files = None, list(dependencies) - if args.precompilation_result: - _logger.debug("Substituting precompilation results.") - messages, _ = args.precompilation_result - else: - _logger.debug("No precompilation results found, using default.") - messages, _ = [], Status.CORRECT - - _logger.info( - "Executing context %s in PRECOMPILATION mode...", args.execution_name - ) - - if lang_config.needs_selector(): - _logger.debug("Selector is needed, using it.") - - selector = selector_name(lang_config) - - executable, messages, status, annotations = lang_config.find_main_file( - files, selector, messages - ) - - _logger.debug(f"Found main file: {executable}") - - for annotation in annotations: - args.collector.add(annotation) - - if status != Status.CORRECT: - return None, messages, status, execution_dir - - files.remove(executable) - stdin = args.unit.get_stdin(bundle.config.resources) - argument = args.execution_name - else: - _logger.debug("Selector is not needed, using individual execution.") - - executable, messages, status, annotations = lang_config.find_main_file( - files, args.execution_name, messages - ) - - for annotation in annotations: - args.collector.add(annotation) + main_file_name = unit.name + argument = None - if status != Status.CORRECT: - return None, messages, status, execution_dir + executable, status = bundle.language.find_main_file(files, main_file_name) + _logger.debug(f"Found main file: {executable}") - files.remove(executable) - stdin = args.unit.get_stdin(bundle.config.resources) - argument = None + if status != Status.CORRECT: + return None, status - remaining = max_time - (time.perf_counter() - start) + files.remove(executable) + stdin = unit.get_stdin(bundle.config.resources) # Do the execution. base_result = execute_file( @@ -329,27 +290,14 @@ def execute_execution( working_directory=execution_dir, stdin=stdin, argument=argument, - remaining=remaining, + remaining=remaining_time, ) testcase_identifier = f"--{bundle.testcase_separator_secret}-- SEP" context_identifier = f"--{bundle.context_separator_secret}-- SEP" - value_file_path = value_file(bundle, execution_dir) - try: - with open(value_file_path, "r") as f: - values = f.read() - except FileNotFoundError: - _logger.warning("Value file not found, looked in %s", value_file_path) - values = "" - - exception_file_path = exception_file(bundle, execution_dir) - try: - with open(exception_file_path, "r") as f: - exceptions = f.read() - except FileNotFoundError: - _logger.warning("Exception file not found, looked in %s", exception_file_path) - exceptions = "" + values = _get_contents_or_empty(value_file(bundle, execution_dir)) + exceptions = _get_contents_or_empty(exception_file(bundle, execution_dir)) result = ExecutionResult( stdout=base_result.stdout, @@ -363,38 +311,64 @@ def execute_execution( memory=base_result.memory, ) - return result, messages, status, execution_dir + return result, status -def merge_contexts_into_units(contexts: List[Context]) -> List[ExecutionUnit]: +def plan_test_suite(bundle: Bundle) -> list[PlannedExecutionUnit]: """ - Merge contexts into as little execution units as possible. + Transform a test suite into a list of execution units. - :param contexts: - :return: + :param bundle: The configuration + :return: A list of planned execution units. """ - # return [ExecutionUnit(contexts=[c]) for c in contexts] + + # First, flatten all contexts into a single list. + flattened_contexts = [] + for t, tab in enumerate(bundle.suite.tabs): + for c, context in enumerate(tab.contexts): + flattened_contexts.append( + PlannedContext(context=context, tab_index=t, context_index=c) + ) units = [] current_unit = [] - for context in contexts: + for planned in flattened_contexts: # If we get stdin, start a new execution unit. if ( - context.has_main_testcase() - and cast(MainInput, context.testcases[0].input).stdin != EmptyChannel.NONE + planned.context.has_main_testcase() + and cast(MainInput, planned.context.testcases[0].input).stdin + != EmptyChannel.NONE ): if current_unit: - units.append(ExecutionUnit(contexts=current_unit)) + units.append( + PlannedExecutionUnit( + contexts=current_unit, + name=execution_name(bundle.language, len(units)), + index=len(units), + ) + ) current_unit = [] - current_unit.append(context) + current_unit.append(planned) - if context.has_exit_testcase(): - units.append(ExecutionUnit(contexts=current_unit)) + if planned.context.has_exit_testcase(): + units.append( + PlannedExecutionUnit( + contexts=current_unit, + name=execution_name(bundle.language, len(units)), + index=len(units), + ) + ) current_unit = [] if current_unit: - units.append(ExecutionUnit(contexts=current_unit)) + units.append( + PlannedExecutionUnit( + contexts=current_unit, + name=execution_name(bundle.language, len(units)), + index=len(units), + ) + ) return units diff --git a/tested/judge/linter.py b/tested/judge/linter.py index 070fd42a..3da0fdf1 100644 --- a/tested/judge/linter.py +++ b/tested/judge/linter.py @@ -26,7 +26,7 @@ def run_linter(bundle: Bundle, collector: OutputManager, remaining: float): _logger.debug("Running linter...") - messages, annotations = bundle.lang_config.linter(remaining) + messages, annotations = bundle.language.linter(remaining) for message in messages: collector.add(AppendMessage(message=message)) diff --git a/tested/judge/planning.py b/tested/judge/planning.py new file mode 100644 index 00000000..e89071c8 --- /dev/null +++ b/tested/judge/planning.py @@ -0,0 +1,63 @@ +""" +This module decides what and when things are executed. +""" +import time +from pathlib import Path +from typing import Optional + +from attrs import define + +from tested.dodona import AnnotateCode, Message, Status +from tested.languages.config import FileFilter +from tested.testsuite import Context + +CompilationResult = tuple[list[Message], Status, list[AnnotateCode]] + + +@define +class PlannedContext: + """Identifies a context by its position in the test suite.""" + + context: Context + tab_index: int + context_index: int + + +@define +class PlannedExecutionUnit: + """ + Planned contexts are grouped together in series of executions. Each execution + consists of the contexts that can be executed together in one go. + """ + + contexts: list[PlannedContext] + # The name of this execution, conventionalized for the language. + name: str + # Which position in the execution plan this execution has. + index: int + + def get_stdin(self, resources: Path) -> str: + return "\n".join(c.context.get_stdin(resources) or "" for c in self.contexts) + + def has_main_testcase(self) -> bool: + return self.contexts[0].context.has_main_testcase() + + def has_exit_testcase(self) -> bool: + return self.contexts[-1].context.has_exit_testcase() + + +@define +class ExecutionPlan: + units: list[PlannedExecutionUnit] + common_directory: Path # The folder in which we will execute. + selector: Optional[str] + + # When the execution has started + max_time: float + start_time: float + + # Stuff that is set after the plan has been made. + files: list[str] | FileFilter # The files we need for execution. + + def remaining_time(self) -> float: + return self.max_time - (time.perf_counter() - self.start_time) diff --git a/tested/judge/programmed.py b/tested/judge/programmed.py index c742174e..50fbf7e2 100644 --- a/tested/judge/programmed.py +++ b/tested/judge/programmed.py @@ -79,7 +79,7 @@ def _evaluate_others( ) # Check if the language supports this. - if Construct.EVALUATION not in eval_bundle.lang_config.supported_constructs(): + if Construct.EVALUATION not in eval_bundle.language.supported_constructs(): _logger.error( f"{eval_bundle.config.programming_language} does not support" f" evaluations." @@ -101,8 +101,8 @@ def _evaluate_others( shutil.copy2(origin_path, custom_path) # Copy the dependencies to the folder. - dependencies = eval_bundle.lang_config.initial_dependencies() - origin = eval_bundle.lang_config.path_to_dependencies() + dependencies = eval_bundle.language.initial_dependencies() + origin = eval_bundle.language.path_to_dependencies() copy_from_paths_to_path(origin, dependencies, custom_path) # Include the actual oracle in the dependencies. dependencies.append(evaluator.function.file.name) @@ -120,7 +120,7 @@ def _evaluate_others( _logger.debug("Generated oracle executor %s", evaluator_name) # Do compilation for those configs that require it. - command, files = eval_bundle.lang_config.compilation(dependencies) + command, files = eval_bundle.language.compilation(dependencies) _logger.debug("Compiling custom oracle with command %s", command) result = run_command(custom_path, None, command) if result and result.stderr: @@ -130,7 +130,7 @@ def _evaluate_others( # Execute the custom oracle. evaluator_name = Path(evaluator_name).stem - files = eval_bundle.lang_config.filter_dependencies(files, evaluator_name) + files = eval_bundle.language.filter_dependencies(files, evaluator_name) for file in files: origin = custom_path / file try: @@ -139,9 +139,7 @@ def _evaluate_others( # If the file already exists, skip it. pass - executable, _, status, _ = eval_bundle.lang_config.find_main_file( - files, evaluator_name, [] - ) + executable, status = eval_bundle.language.find_main_file(files, evaluator_name) if status != Status.CORRECT: return BaseExecutionResult( diff --git a/tested/languages/c/generators.py b/tested/languages/c/generators.py index 6159b421..966a3a4f 100644 --- a/tested/languages/c/generators.py +++ b/tested/languages/c/generators.py @@ -171,7 +171,7 @@ def _generate_internal_context(ctx: PreparedContext, pu: PreparedExecutionUnit) # Generate code for each testcase tc: PreparedTestcase for tc in ctx.testcases: - result += f"{pu.execution_name}_write_separator();\n" + result += f"{pu.unit.name}_write_separator();\n" if tc.testcase.is_main_testcase(): assert isinstance(tc.input, MainInput) @@ -215,59 +215,59 @@ def convert_execution_unit(pu: PreparedExecutionUnit) -> str: result += f'#include "{name}.c"\n' result += f""" - static FILE* {pu.execution_name}_value_file = NULL; - static FILE* {pu.execution_name}_exception_file = NULL; + static FILE* {pu.unit.name}_value_file = NULL; + static FILE* {pu.unit.name}_exception_file = NULL; - static void {pu.execution_name}_write_separator() {{ - fprintf({pu.execution_name}_value_file, "--{pu.testcase_separator_secret}-- SEP"); - fprintf({pu.execution_name}_exception_file, "--{pu.testcase_separator_secret}-- SEP"); + static void {pu.unit.name}_write_separator() {{ + fprintf({pu.unit.name}_value_file, "--{pu.testcase_separator_secret}-- SEP"); + fprintf({pu.unit.name}_exception_file, "--{pu.testcase_separator_secret}-- SEP"); fprintf(stdout, "--{pu.testcase_separator_secret}-- SEP"); fprintf(stderr, "--{pu.testcase_separator_secret}-- SEP"); }} - static void {pu.execution_name}_write_context_separator() {{ - fprintf({pu.execution_name}_value_file, "--{pu.context_separator_secret}-- SEP"); - fprintf({pu.execution_name}_exception_file, "--{pu.context_separator_secret}-- SEP"); + static void {pu.unit.name}_write_context_separator() {{ + fprintf({pu.unit.name}_value_file, "--{pu.context_separator_secret}-- SEP"); + fprintf({pu.unit.name}_exception_file, "--{pu.context_separator_secret}-- SEP"); fprintf(stdout, "--{pu.context_separator_secret}-- SEP"); fprintf(stderr, "--{pu.context_separator_secret}-- SEP"); }} #undef send_value - #define send_value(value) write_value({pu.execution_name}_value_file, value) + #define send_value(value) write_value({pu.unit.name}_value_file, value) #undef send_specific_value - #define send_specific_value(value) write_evaluated({pu.execution_name}_value_file, value) + #define send_specific_value(value) write_evaluated({pu.unit.name}_value_file, value) """ # Generate code for each context. ctx: PreparedContext for i, ctx in enumerate(pu.contexts): result += f""" - int {pu.execution_name}_context_{i}(void) {{ + int {pu.unit.name}_context_{i}(void) {{ {_generate_internal_context(ctx, pu)} }} """ result += f""" - int {pu.execution_name}() {{ - {pu.execution_name}_value_file = fopen("{pu.value_file}", "w"); - {pu.execution_name}_exception_file = fopen("{pu.exception_file}", "w"); + int {pu.unit.name}() {{ + {pu.unit.name}_value_file = fopen("{pu.value_file}", "w"); + {pu.unit.name}_exception_file = fopen("{pu.exception_file}", "w"); int exit_code; """ for i, ctx in enumerate(pu.contexts): - result += " " * 4 + f"{pu.execution_name}_write_context_separator();\n" - result += " " * 4 + f"exit_code = {pu.execution_name}_context_{i}();\n" + result += " " * 4 + f"{pu.unit.name}_write_context_separator();\n" + result += " " * 4 + f"exit_code = {pu.unit.name}_context_{i}();\n" result += f""" - fclose({pu.execution_name}_value_file); - fclose({pu.execution_name}_exception_file); + fclose({pu.unit.name}_value_file); + fclose({pu.unit.name}_exception_file); return exit_code; }} #ifndef INCLUDED int main() {{ - return {pu.execution_name}(); + return {pu.unit.name}(); }} #endif """ diff --git a/tested/languages/config.py b/tested/languages/config.py index 46849185..70028dfe 100644 --- a/tested/languages/config.py +++ b/tested/languages/config.py @@ -24,7 +24,6 @@ from tested.datatypes import AllTypes, ExpressionTypes from tested.dodona import AnnotateCode, Message, Status from tested.features import Construct, TypeSupport -from tested.internationalization import get_i18n_string from tested.languages.conventionalize import ( EXECUTION_PREFIX, Conventionable, @@ -358,19 +357,21 @@ def filter_function(file: Path) -> bool: @typing.overload def find_main_file( - self, files: List[Path], name: str, precompilation_messages: List[Message] - ) -> Tuple[Path, List[Message], typing.Literal[Status.CORRECT], List[AnnotateCode]]: + self, + files: List[Path], + name: str, + ) -> tuple[Path, typing.Literal[Status.CORRECT]]: ... @typing.overload - def find_main_file( - self, files: List[Path], name: str, precompilation_messages: List[Message] - ) -> Tuple[None, List[Message], Status, List[AnnotateCode]]: + def find_main_file(self, files: List[Path], name: str) -> tuple[None, Status]: ... def find_main_file( - self, files: List[Path], name: str, precompilation_messages: List[Message] - ) -> Tuple[Optional[Path], List[Message], Status, List[AnnotateCode]]: + self, + files: List[Path], + name: str, + ) -> Tuple[Optional[Path], Status]: """ Find the "main" file in a list of files. @@ -379,19 +380,14 @@ def find_main_file( :param files: A list of files. :param name: The name of the main file. - :param precompilation_messages: A list of precompilation messages. :return: The main file or a list of messages. """ - # TODO: check why the messages are needed here... _logger.debug("Finding %s in %s", name, files) - messages = [] possible_main_files = [x for x in files if x.name.startswith(name)] if possible_main_files: - return possible_main_files[0], messages, Status.CORRECT, [] + return possible_main_files[0], Status.CORRECT else: - messages.extend(precompilation_messages) - messages.append(get_i18n_string("languages.config.unknown.compilation")) - return None, messages, Status.COMPILATION_ERROR, [] + return None, Status.COMPILATION_ERROR def cleanup_stacktrace(self, stacktrace: str) -> str: """ diff --git a/tested/languages/conventionalize.py b/tested/languages/conventionalize.py index ffc7d960..039815b9 100644 --- a/tested/languages/conventionalize.py +++ b/tested/languages/conventionalize.py @@ -441,15 +441,14 @@ def selector_file(language: "Language") -> str: return language.with_extension(selector_name(language)) -def execution_name(language: "Language", tab_number: int, execution_number: int) -> str: +def execution_name(language: "Language", execution_number: int) -> str: """ Get the name of an execution. The name should be unique for the tab and execution number combination. :param language: The language module. - :param tab_number: The number of the tab. :param execution_number: The number of the execution. :return: The name of the execution. """ - name = f"{EXECUTION_PREFIX}_{tab_number}_{execution_number}" + name = f"{EXECUTION_PREFIX}_{execution_number}" return conventionalize_namespace(language, name) diff --git a/tested/languages/csharp/config.py b/tested/languages/csharp/config.py index ff10846d..0667f326 100644 --- a/tested/languages/csharp/config.py +++ b/tested/languages/csharp/config.py @@ -6,7 +6,6 @@ from tested.datatypes import AllTypes from tested.dodona import AnnotateCode, Message, Status from tested.features import Construct, TypeSupport -from tested.internationalization import get_i18n_string from tested.languages.config import ( CallbackResult, Command, @@ -123,24 +122,20 @@ def execution(self, cwd: Path, file: str, arguments: List[str]) -> Command: return ["dotnet", file, *arguments] def find_main_file( - self, files: List[Path], name: str, precompilation_messages: List[str] - ) -> Tuple[Optional[Path], List[Message], Status, List[AnnotateCode]]: + self, files: List[Path], name: str + ) -> Tuple[Optional[Path], Status]: # TODO: specify the extension (if any) of the output files, so we don't need to # override this. logger.debug("Finding %s in %s", name, files) - messages = [] possible_main_files = [ x for x in files if x.name.startswith(name) and x.suffix == ".dll" ] if possible_main_files: - return possible_main_files[0], messages, Status.CORRECT, [] + return possible_main_files[0], Status.CORRECT else: - messages.extend(precompilation_messages) - messages.append(get_i18n_string("languages.config.unknown.compilation")) - return None, messages, Status.COMPILATION_ERROR, [] + return None, Status.COMPILATION_ERROR def modify_solution(self, solution: Path): - # noinspection PyTypeChecker with open(solution, "r") as file: contents = file.read() diff --git a/tested/languages/csharp/generators.py b/tested/languages/csharp/generators.py index d7610e04..0e06945e 100644 --- a/tested/languages/csharp/generators.py +++ b/tested/languages/csharp/generators.py @@ -326,14 +326,14 @@ def convert_execution_unit(pu: PreparedExecutionUnit) -> str: namespace Tested {{ -class {pu.execution_name} +class {pu.unit.name} {{ private readonly StreamWriter valueFile; private readonly StreamWriter exceptionFile; private readonly StreamWriter stdout; private readonly StreamWriter stderr; - public {pu.execution_name}() + public {pu.unit.name}() {{ valueFile = new StreamWriter(File.OpenWrite("{pu.value_file}")); exceptionFile = new StreamWriter(File.OpenWrite("{pu.exception_file}")); @@ -406,7 +406,7 @@ class {pu.execution_name} result += f""" public static void Main(string[] a) {{ - {pu.execution_name} execution = new {pu.execution_name}(); + {pu.unit.name} execution = new {pu.unit.name}(); execution.Execute(); }} }} diff --git a/tested/languages/generation.py b/tested/languages/generation.py index db9cea4e..a5961845 100644 --- a/tested/languages/generation.py +++ b/tested/languages/generation.py @@ -7,8 +7,9 @@ import re import shlex import urllib.parse +from collections.abc import Iterable from pathlib import Path -from typing import TYPE_CHECKING, Iterable, List, Match, Set, Tuple, TypeAlias +from typing import Match, TypeAlias from pygments import highlight from pygments.formatters.html import HtmlFormatter @@ -18,6 +19,7 @@ from tested.datatypes import AllTypes, BasicObjectTypes, BasicSequenceTypes from tested.dodona import ExtendedMessage from tested.internationalization import get_i18n_string +from tested.judge.planning import PlannedExecutionUnit from tested.languages import Language from tested.languages.conventionalize import ( conventionalize_namespace, @@ -52,17 +54,13 @@ TextData, ) -# Prevent cyclic imports for types... -if TYPE_CHECKING: - from tested.judge.execution import ExecutionUnit - _logger = logging.getLogger(__name__) _html_formatter = HtmlFormatter(nowrap=True) # Alias for type declarations NestedTypeDeclaration: TypeAlias = ( - AllTypes | Tuple[AllTypes, Tuple["NestedTypeDeclaration", ...]] + AllTypes | tuple[AllTypes, tuple["NestedTypeDeclaration", ...]] ) @@ -86,10 +84,10 @@ def generate_execution_unit( :param bundle: :return: """ - return bundle.lang_config.generate_execution_unit(prepared_execution) + return bundle.language.generate_execution_unit(prepared_execution) -def _handle_link_files(link_files: Iterable[FileUrl], language: str) -> Tuple[str, str]: +def _handle_link_files(link_files: Iterable[FileUrl], language: str) -> tuple[str, str]: dict_links = dict( (link_file.name, get_converter().unstructure(link_file)) for link_file in link_files @@ -114,7 +112,7 @@ def _escape_shell(arg: str) -> str: def get_readable_input( bundle: Bundle, case: Testcase -) -> Tuple[ExtendedMessage, Set[FileUrl]]: +) -> tuple[ExtendedMessage, set[FileUrl]]: """ Get human-readable input for a testcase. This function will use, in order of availability: @@ -133,7 +131,7 @@ def get_readable_input( # See https://rouge-ruby.github.io/docs/Rouge/Lexers/ConsoleLexer.html format_ = "console" arguments = " ".join(_escape_shell(x) for x in case.input.arguments) - submission = submission_name(bundle.lang_config) + submission = submission_name(bundle.language) args = f"$ {submission} {arguments}" if isinstance(case.input.stdin, TextData): stdin = case.input.stdin.get_data_as_string(bundle.config.resources) @@ -149,7 +147,7 @@ def get_readable_input( elif isinstance(case.input, Statement): format_ = bundle.config.programming_language text = generate_statement(bundle, case.input) - text = bundle.lang_config.cleanup_description(text) + text = bundle.language.cleanup_description(text) else: assert isinstance(case.input, LanguageLiterals) text = case.input.get_for(bundle.config.programming_language) @@ -239,38 +237,33 @@ def generate_statement(bundle: Bundle, statement: Statement) -> str: assert isinstance(statement, Assignment) statement = prepare_assignment(bundle, statement) - return bundle.lang_config.generate_statement(statement) + return bundle.language.generate_statement(statement) def generate_execution( bundle: Bundle, destination: Path, - execution_unit: "ExecutionUnit", - execution_name: str, -) -> Tuple[str, List[str]]: + execution_unit: PlannedExecutionUnit, +) -> tuple[str, list[str]]: """ Generate the files related to the execution. :param bundle: The configuration bundle. :param destination: Where the generated files should go. :param execution_unit: The execution for which generation is happening. - :param execution_name: The name of the execution module. :return: The name of the generated file in the given destination and a set of oracle names that will also be needed. """ - prepared_execution = prepare_execution_unit( - bundle, destination, execution_name, execution_unit - ) - + prepared_execution = prepare_execution_unit(bundle, destination, execution_unit) execution_code = generate_execution_unit(bundle, prepared_execution) evaluator_files = [ - f"{x}.{bundle.lang_config.file_extension()}" + f"{x}.{bundle.language.file_extension()}" for x in prepared_execution.evaluator_names ] - execution_name = bundle.lang_config.with_extension(execution_name) + execution_name = bundle.language.with_extension(execution_unit.name) execution_destination = destination / execution_name with open(execution_destination, "w") as execution_file: @@ -280,7 +273,7 @@ def generate_execution( def generate_selector( - bundle: Bundle, destination: Path, context_names: List[str] + bundle: Bundle, destination: Path, context_names: list[str] ) -> str: """ Generate the file to execute_module a single context. @@ -291,10 +284,10 @@ def generate_selector( :return: The name of the generated file in the given destination. """ - assert bundle.lang_config.needs_selector() - selector_filename = selector_file(bundle.lang_config) + assert bundle.language.needs_selector() + selector_filename = selector_file(bundle.language) selector_destination = destination / selector_filename - selector_code = bundle.lang_config.generate_selector(context_names) + selector_code = bundle.language.generate_selector(context_names) with open(selector_destination, "w") as execution_file: execution_file.write(selector_code) return selector_filename @@ -325,7 +318,7 @@ def generate_custom_evaluator( :return: The name of the generated file. """ evaluator_name = conventionalize_namespace( - bundle.lang_config, evaluator.function.file.stem + bundle.language, evaluator.function.file.stem ) arguments = custom_oracle_arguments(evaluator) @@ -337,10 +330,10 @@ def generate_custom_evaluator( has_root_namespace=False, ) - code = bundle.lang_config.generate_check_function(evaluator_name, function) + code = bundle.language.generate_check_function(evaluator_name, function) if destination.is_dir(): - destination /= bundle.lang_config.with_extension("EvaluatorExecutor") + destination /= bundle.language.with_extension("EvaluatorExecutor") with open(destination, "w") as check_function_file: check_function_file.write(code) diff --git a/tested/languages/haskell/generators.py b/tested/languages/haskell/generators.py index 8e0cb0fa..f9b3f37f 100644 --- a/tested/languages/haskell/generators.py +++ b/tested/languages/haskell/generators.py @@ -171,7 +171,7 @@ def convert_statement(statement: Statement, lifting=False) -> str: def convert_execution_unit(pu: PreparedExecutionUnit) -> str: result = f"""{{-# LANGUAGE NamedFieldPuns #-}} -module {pu.execution_name} where +module {pu.unit.name} where import System.IO (hPutStr, stderr, stdout, hFlush) import System.Environment @@ -230,8 +230,8 @@ def convert_execution_unit(pu: PreparedExecutionUnit) -> str: # Generate code for each context. ctx: PreparedContext for i, ctx in enumerate(pu.contexts): - result += f"{pu.execution_name.lower()}Context{i} :: IO ()\n" - result += f"{pu.execution_name.lower()}Context{i} = do\n" + result += f"{pu.unit.name.lower()}Context{i} :: IO ()\n" + result += f"{pu.unit.name.lower()}Context{i} = do\n" result += indent + ctx.before + "\n" # Generate code for each testcase @@ -296,7 +296,7 @@ def convert_execution_unit(pu: PreparedExecutionUnit) -> str: """ for i, ctx in enumerate(pu.contexts): result += indent + "writeContextSeparator\n" - result += indent + f"{pu.execution_name.lower()}Context{i}\n" + result += indent + f"{pu.unit.name.lower()}Context{i}\n" result += indent + 'putStr ""\n' return result diff --git a/tested/languages/java/generators.py b/tested/languages/java/generators.py index a7b9bab2..2fbdf1ea 100644 --- a/tested/languages/java/generators.py +++ b/tested/languages/java/generators.py @@ -319,12 +319,12 @@ def convert_execution_unit(pu: PreparedExecutionUnit) -> str: import java.math.BigInteger; import java.math.BigDecimal; - public class {pu.execution_name} implements Closeable {{ + public class {pu.unit.name} implements Closeable {{ private final PrintWriter valueWriter; private final PrintWriter exceptionWriter; - public {pu.execution_name}() throws Exception {{ + public {pu.unit.name}() throws Exception {{ this.valueWriter = new PrintWriter("{pu.value_file}"); this.exceptionWriter = new PrintWriter("{pu.exception_file}"); }} @@ -393,7 +393,7 @@ def convert_execution_unit(pu: PreparedExecutionUnit) -> str: }} public static void main(String[] a) throws Exception {{ - try({pu.execution_name} execution = new {pu.execution_name}()) {{ + try({pu.unit.name} execution = new {pu.unit.name}()) {{ execution.execute(); }} }} diff --git a/tested/languages/kotlin/config.py b/tested/languages/kotlin/config.py index fa97497c..328719fe 100644 --- a/tested/languages/kotlin/config.py +++ b/tested/languages/kotlin/config.py @@ -183,16 +183,14 @@ def linter(self, remaining: float) -> Tuple[List[Message], List[AnnotateCode]]: return linter.run_ktlint(self.config.dodona, remaining) def find_main_file( - self, files: List[Path], name: str, precompilation_messages: List[Message] - ) -> Tuple[Optional[Path], List[Message], Status, List[AnnotateCode]]: + self, files: List[Path], name: str + ) -> Tuple[Optional[Path], Status]: logger.debug("Finding %s in %s", name, files) - main, msgs, status, ants = Language.find_main_file( - self, files, name + "Kt", precompilation_messages - ) + main, status = Language.find_main_file(self, files, name + "Kt") if status == Status.CORRECT: - return main, msgs, status, ants + return main, status else: - return Language.find_main_file(self, files, name, precompilation_messages) + return Language.find_main_file(self, files, name) def filter_dependencies(self, files: List[Path], context_name: str) -> List[Path]: def filter_function(file_path: Path) -> bool: diff --git a/tested/languages/kotlin/generators.py b/tested/languages/kotlin/generators.py index d8a4a51d..3db33339 100644 --- a/tested/languages/kotlin/generators.py +++ b/tested/languages/kotlin/generators.py @@ -268,7 +268,7 @@ def convert_execution_unit(pu: PreparedExecutionUnit) -> str: import java.math.BigInteger import java.math.BigDecimal -class {pu.execution_name}: AutoCloseable {{ +class {pu.unit.name}: AutoCloseable {{ private val valueWriter = PrintWriter("{pu.value_file}") private val exceptionWriter = PrintWriter("{pu.exception_file}") @@ -369,7 +369,7 @@ class {pu.execution_name}: AutoCloseable {{ }} fun main(args: Array = emptyArray()) {{ - val execution = {pu.execution_name}() + val execution = {pu.unit.name}() execution.use {{ it.execute() }} diff --git a/tested/languages/preparation.py b/tested/languages/preparation.py index 3789b30a..b737a573 100644 --- a/tested/languages/preparation.py +++ b/tested/languages/preparation.py @@ -57,8 +57,8 @@ ) if TYPE_CHECKING: - from tested.judge.execution import ExecutionUnit - from tested.languages import Language + from tested.judge.planning import PlannedExecutionUnit + from tested.languages.config import Language # Names of the predefined functions that must be available. SEND_VALUE = "send_value" @@ -177,24 +177,24 @@ class PreparedExecutionUnit: An execution unit that has been prepared for code generation. """ - execution_unit: "ExecutionUnit" - "The original execution unit." + # The planned, original execution unit. + unit: "PlannedExecutionUnit" + # A list of prepared contexts. contexts: List[PreparedContext] - "The prepared contexts." - execution_name: str - "The name of the execution." + # The name of the file for the return channel. value_file: str - "The name of the file for the return channel." + # The name of the file for the exception channel. exception_file: str - "The name of the file for the exception channel." + # The name of the submission file. submission_name: str - "The name of the submission file." + # The secret context separator. context_separator_secret: str - "Secret for use in the context separator." + # The secret testcase separator. testcase_separator_secret: str - "Secret for use in the testcase separator." + # The names of the language-specific functions we will need. evaluator_names: Set[str] - "The names of the language-specific functions we will need." + # The language module. + # TODO: this should not go here, but it does. language: "Language" @@ -210,11 +210,11 @@ def prepare_argument( def prepare_assignment(bundle: Bundle, assignment: Assignment) -> Assignment: if isinstance(assignment.type, VariableType): - class_type = conventionalize_class(bundle.lang_config, assignment.type.data) + class_type = conventionalize_class(bundle.language, assignment.type.data) assignment = assignment.replace_type(VariableType(data=class_type)) assignment = assignment.replace_variable( - conventionalize_identifier(bundle.lang_config, assignment.variable) + conventionalize_identifier(bundle.language, assignment.variable) ) prepared = prepare_expression(bundle, assignment.expression) return assignment.replace_expression(prepared) @@ -228,25 +228,25 @@ def prepare_expression(bundle: Bundle, expression: Expression) -> Expression: if isinstance(expression, Identifier): if not expression.is_raw: expression = Identifier( - conventionalize_identifier(bundle.lang_config, expression) + conventionalize_identifier(bundle.language, expression) ) elif isinstance(expression, PreparedFunctionCall): expression.arguments = [ prepare_argument(bundle, arg) for arg in expression.arguments ] elif isinstance(expression, FunctionCall): - submission = submission_name(bundle.lang_config) + submission = submission_name(bundle.language) if expression.type == FunctionType.CONSTRUCTOR: - name = conventionalize_class(bundle.lang_config, expression.name) + name = conventionalize_class(bundle.language, expression.name) elif expression.type == FunctionType.PROPERTY: if expression.namespace is None: name = conventionalize_global_identifier( - bundle.lang_config, expression.name + bundle.language, expression.name ) else: - name = conventionalize_property(bundle.lang_config, expression.name) + name = conventionalize_property(bundle.language, expression.name) else: - name = conventionalize_function(bundle.lang_config, expression.name) + name = conventionalize_function(bundle.language, expression.name) if expression.namespace is None: namespace = Identifier(submission) @@ -302,7 +302,7 @@ def _create_handling_function( :param output: The oracle. :return: A tuple containing the call and the name of the oracle if present. """ - lang_config = bundle.lang_config + lang_config = bundle.language if isinstance(output, OracleOutputChannel) and isinstance( output.oracle, LanguageSpecificOracle ): @@ -515,8 +515,7 @@ def exception_file(bundle: Bundle, directory: Path): def prepare_execution_unit( bundle: Bundle, destination: Path, - execution_name: str, - execution_unit: "ExecutionUnit", + execution_unit: "PlannedExecutionUnit", ) -> PreparedExecutionUnit: """ Prepare an execution unit for code generation. @@ -524,7 +523,6 @@ def prepare_execution_unit( :param bundle: The configuration bundle. :param destination: Where the generated files should go. :param execution_unit: The execution for which generation is happening. - :param execution_name: The name of the execution module. :return: The name of the generated file in the given destination and a set of oracle names that will also be needed. @@ -532,23 +530,22 @@ def prepare_execution_unit( evaluator_names = set() contexts = [] for context in execution_unit.contexts: - context_args, context_evaluator_names = prepare_context(bundle, context) + context_args, context_evaluator_names = prepare_context(bundle, context.context) contexts.append(context_args) evaluator_names.update(context_evaluator_names) value_file_name = value_file(bundle, destination).name exception_file_name = exception_file(bundle, destination).name - submission = submission_name(bundle.lang_config) + submission = submission_name(bundle.language) return PreparedExecutionUnit( - execution_name=execution_name, value_file=value_file_name, exception_file=exception_file_name, submission_name=submission, testcase_separator_secret=bundle.testcase_separator_secret, context_separator_secret=bundle.context_separator_secret, contexts=contexts, - execution_unit=execution_unit, + unit=execution_unit, evaluator_names=evaluator_names, - language=bundle.lang_config, + language=bundle.language, ) diff --git a/tested/languages/python/generators.py b/tested/languages/python/generators.py index 85a9b137..59b739cd 100644 --- a/tested/languages/python/generators.py +++ b/tested/languages/python/generators.py @@ -190,7 +190,7 @@ def send_specific_exception(exception): ctx: PreparedContext for i, ctx in enumerate(pu.contexts): indent = " " * 4 - result += f"def {pu.execution_name}_context_{i}():\n" + result += f"def {pu.unit.name}_context_{i}():\n" result += indent + ctx.before + "\n" if not ctx.context.has_main_testcase(): @@ -235,7 +235,7 @@ def send_specific_exception(exception): for i, ctx in enumerate(pu.contexts): result += "write_context_separator()\n" - result += f"{pu.execution_name}_context_{i}()\n" + result += f"{pu.unit.name}_context_{i}()\n" result += """ value_file.close() diff --git a/tested/languages/utils.py b/tested/languages/utils.py index d0efd218..cfd608f8 100644 --- a/tested/languages/utils.py +++ b/tested/languages/utils.py @@ -3,7 +3,7 @@ import os import re from pathlib import Path -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING, Optional, overload from tested.configs import GlobalConfig from tested.datatypes import BasicStringTypes @@ -142,6 +142,20 @@ def modify_line_number(match: re.Match) -> str: return re.sub(code_regex, modify_line_number, stacktrace) +@overload +def convert_stacktrace_to_clickable_feedback( + lang: "Language", stacktrace: str +) -> ExtendedMessage: + ... + + +@overload +def convert_stacktrace_to_clickable_feedback( + lang: "Language", stacktrace: None +) -> None: + ... + + def convert_stacktrace_to_clickable_feedback( lang: "Language", stacktrace: Optional[str] ) -> Optional[ExtendedMessage]: diff --git a/tested/oracles/common.py b/tested/oracles/common.py index 24296b3a..725cf948 100644 --- a/tested/oracles/common.py +++ b/tested/oracles/common.py @@ -90,7 +90,7 @@ def cleanup_specific_programmed( config: OracleConfig, channel: NormalOutputChannel, actual: EvalResult ) -> EvalResult: if isinstance(channel, ExceptionOutputChannel): - lang_config = config.bundle.lang_config + lang_config = config.bundle.language actual.readable_expected = lang_config.cleanup_stacktrace( actual.readable_expected or "" ) diff --git a/tested/oracles/exception.py b/tested/oracles/exception.py index c2ce2253..3c3d094d 100644 --- a/tested/oracles/exception.py +++ b/tested/oracles/exception.py @@ -15,7 +15,7 @@ def try_as_exception(config: OracleConfig, value: str) -> ExceptionValue: actual = get_converter().loads(value, ExceptionValue) - actual.stacktrace = config.bundle.lang_config.cleanup_stacktrace(actual.stacktrace) + actual.stacktrace = config.bundle.language.cleanup_stacktrace(actual.stacktrace) return actual @@ -25,15 +25,13 @@ def try_as_readable_exception( # noinspection PyBroadException try: actual = get_converter().loads(value, ExceptionValue) - actual.stacktrace = config.bundle.lang_config.cleanup_stacktrace( - actual.stacktrace - ) + actual.stacktrace = config.bundle.language.cleanup_stacktrace(actual.stacktrace) except Exception: return None, None else: readable = actual.readable(omit_type=False) message = convert_stacktrace_to_clickable_feedback( - config.bundle.lang_config, actual.stacktrace + config.bundle.language, actual.stacktrace ) return readable, message @@ -105,7 +103,7 @@ def evaluate( # To keep things clean, we only do this if the test is incorrect. cleaned_stacktrace = convert_stacktrace_to_clickable_feedback( - config.bundle.lang_config, actual.stacktrace + config.bundle.language, actual.stacktrace ) if cleaned_stacktrace and status != Status.CORRECT: messages.append(cleaned_stacktrace) diff --git a/tested/oracles/value.py b/tested/oracles/value.py index d95ffdd7..2c5fb9c6 100644 --- a/tested/oracles/value.py +++ b/tested/oracles/value.py @@ -141,7 +141,7 @@ def _check_simple_type( :return: A tuple with the result and expected value, which can have a modified type. """ - supported_types = fallback_type_support_map(bundle.lang_config) + supported_types = fallback_type_support_map(bundle.language) # Case 3. if supported_types[expected.type] == TypeSupport.UNSUPPORTED: diff --git a/tests/test_functionality.py b/tests/test_functionality.py index a7f60c63..0eb57b0d 100644 --- a/tests/test_functionality.py +++ b/tests/test_functionality.py @@ -857,8 +857,7 @@ def test_function_arguments_without_brackets(tmp_path: Path, pytestconfig): result = generate_statement(bundle, statement) assert ( - result - == f'{submission_name(bundle.lang_config)}.test 5.5 :: Double "hallo" True' + result == f'{submission_name(bundle.language)}.test 5.5 :: Double "hallo" True' ) diff --git a/tests/test_serialisation.py b/tests/test_serialisation.py index 54ef94cf..39f3f11f 100644 --- a/tests/test_serialisation.py +++ b/tests/test_serialisation.py @@ -169,15 +169,15 @@ def run_encoder(bundle: Bundle, values: List[Value]) -> List[str]: # Copy dependencies. - dependency_paths = bundle.lang_config.path_to_dependencies() - dependencies = bundle.lang_config.initial_dependencies() + dependency_paths = bundle.language.path_to_dependencies() + dependencies = bundle.language.initial_dependencies() dest = bundle.config.workdir copy_from_paths_to_path(dependency_paths, dependencies, dest) - name = conventionalize_namespace(bundle.lang_config, "encode") - encoder_name = bundle.lang_config.with_extension(name) + name = conventionalize_namespace(bundle.language, "encode") + encoder_name = bundle.language.with_extension(name) encoder_destination = dest / encoder_name - encode_code = bundle.lang_config.generate_encoder(values) + encode_code = bundle.language.generate_encoder(values) with open(encoder_destination, "w") as encoder_file: encoder_file.write(encode_code) @@ -189,8 +189,8 @@ def run_encoder(bundle: Bundle, values: List[Value]) -> List[str]: assert e.exit == 0 files = filter_files(files, dest) - files = bundle.lang_config.filter_dependencies(files, name) - executable = bundle.lang_config.find_main_file(files, name, [])[0] + files = bundle.language.filter_dependencies(files, name) + executable, _ = bundle.language.find_main_file(files, name) # Run the code. r = execute_file(bundle, executable.name, dest, None) @@ -211,7 +211,7 @@ def test_basic_types(language, tmp_path: Path, pytestconfig): conf = configuration(pytestconfig, "", language, tmp_path) plan = Suite() bundle = create_bundle(conf, sys.stdout, plan) - type_map = fallback_type_support_map(bundle.lang_config) + type_map = fallback_type_support_map(bundle.language) # Create a list of basic types we want to test. types = [v for v in BASIC_VALUES if type_map[v.type] != TypeSupport.UNSUPPORTED] @@ -236,7 +236,7 @@ def test_advanced_types(language, tmp_path: Path, pytestconfig): conf = configuration(pytestconfig, "", language, tmp_path) plan = Suite() bundle = create_bundle(conf, sys.stdout, plan) - type_map = fallback_type_support_map(bundle.lang_config) + type_map = fallback_type_support_map(bundle.language) # Create a list of basic types we want to test. # We want to test all supported or reduced types. @@ -278,7 +278,7 @@ def test_special_numbers(language, tmp_path: Path, pytestconfig): conf = configuration(pytestconfig, "", language, tmp_path) plan = Suite() bundle = create_bundle(conf, sys.stdout, plan) - type_map = fallback_type_support_map(bundle.lang_config) + type_map = fallback_type_support_map(bundle.language) # Create a list of basic types we want to test. types = [] @@ -319,7 +319,7 @@ def test_valid_type_map(language: str, tmp_path: Path, pytestconfig): conf = configuration(pytestconfig, "", language, tmp_path) plan = Suite() bundle = create_bundle(conf, sys.stdout, plan) - type_map = fallback_type_support_map(bundle.lang_config) + type_map = fallback_type_support_map(bundle.language) # Validate basic types. for basic_type in get_args(BasicTypes):