From e95390efedfecc51db19632711ba672721835f8d Mon Sep 17 00:00:00 2001 From: Vinicius Vaz Date: Mon, 23 Oct 2023 13:13:48 -0300 Subject: [PATCH] using string types instead of file --- config.toml | 2 +- .../test_prompt_creator_for_image_generator_piece.py | 8 ++++---- .../TextGeneratorPiece/test_text_generator_piece.py | 11 +++-------- 3 files changed, 8 insertions(+), 13 deletions(-) diff --git a/config.toml b/config.toml index d6a48ab..8659b5d 100644 --- a/config.toml +++ b/config.toml @@ -9,4 +9,4 @@ REPOSITORY_LABEL = "OpenAI Domino Pieces" # The version of this Pieces release # Attention: changing this will create a new release -VERSION = "0.4.0" \ No newline at end of file +VERSION = "0.4.0" \ No newline at end of file diff --git a/pieces/PromptCreatorForImageGeneratorPiece/test_prompt_creator_for_image_generator_piece.py b/pieces/PromptCreatorForImageGeneratorPiece/test_prompt_creator_for_image_generator_piece.py index 907b415..10af27a 100644 --- a/pieces/PromptCreatorForImageGeneratorPiece/test_prompt_creator_for_image_generator_piece.py +++ b/pieces/PromptCreatorForImageGeneratorPiece/test_prompt_creator_for_image_generator_piece.py @@ -1,7 +1,7 @@ from domino.testing import piece_dry_run import tiktoken import os -from pathlib import Path + def run_piece( @@ -30,12 +30,12 @@ def run_piece( } ) -def test_piece(): +def test_prompt_creator_for_image_generator_piece(): piece_kwargs = { "context": "Explorers dive into a mesmerizing underwater city, discovering ancient secrets, mysterious symbols, and evidence of an advanced civilization.", "art_style": "surrealistic oceanic exploration", "completion_max_tokens": 350, - "output_type": "file", + "output_type": "file_and_string", "openai_model": "gpt-3.5-turbo", "temperature": 0.7, } @@ -47,7 +47,7 @@ def test_piece(): if piece_kwargs["output_type"] == "file": assert output.get("generated_prompt_string") == None assert output.get("generated_prompt_file_path").endswith(".txt") - generated_prompt_path = Path(output.get("generated_prompt_file_path")) + generated_prompt_path = output.get("generated_prompt_file_path") with open(generated_prompt_path, "r") as f: generated_prompt = f.read() diff --git a/pieces/TextGeneratorPiece/test_text_generator_piece.py b/pieces/TextGeneratorPiece/test_text_generator_piece.py index 0586776..5002c4c 100644 --- a/pieces/TextGeneratorPiece/test_text_generator_piece.py +++ b/pieces/TextGeneratorPiece/test_text_generator_piece.py @@ -3,6 +3,7 @@ import tiktoken import os + def run_piece( template: str, prompt_args: List[dict], @@ -29,14 +30,14 @@ def run_piece( } ) -def test_piece(): +def test_text_generator_piece(): template = "tell me about the history of {event_history}" prompt_args = [{"arg_name": "event_history", "arg_value": "artifical intelligence"}] piece_kwargs = { "template": template, "prompt_args": prompt_args, - "output_type": "file", + "output_type": "file_and_string", "completion_max_tokens": 500, "openai_model": "gpt-3.5-turbo", } @@ -48,9 +49,6 @@ def test_piece(): if piece_kwargs["output_type"] == "file": assert output.get("string_generated_text") == None assert output.get("file_path_generated_text").endswith(".txt") - generated_prompt_path = output.get("file_path_generated_text") - with open(generated_prompt_path, "r") as f: - generated_prompt = f.read() if piece_kwargs["output_type"] == "string": assert output.get("string_generated_text") != None and type(output.get("string_generated_text")) == str assert output.get("file_path_generated_text") == None @@ -64,6 +62,3 @@ def test_piece(): text_tokens = encoding.encode(text=generated_prompt) assert len(text_tokens) <= piece_kwargs["completion_max_tokens"] - -if __name__ == "__main__": - test_piece()