Skip to content

Commit

Permalink
minor updates: rename function and show sampled comp ids
Browse files Browse the repository at this point in the history
  • Loading branch information
wenzhe-log10 committed Mar 8, 2024
1 parent 7a31789 commit e8cee6e
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 6 deletions.
5 changes: 3 additions & 2 deletions log10/feedback/_summary_feedback_utils.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@

from magentic import SystemMessage, UserMessage, chatprompt
from magentic.chat_model.openai_chat_model import OpenaiChatModel
from magentic.chatprompt import escape_braces
Expand Down Expand Up @@ -72,12 +73,12 @@
SystemMessage(SUMMARY_SYSTEM_PROMPT),
UserMessage(SUMMARY_USER_MESSAGE),
UserMessage("Examples: \n{examples}\n\nTest: \n{prompt}"),
model=OpenaiChatModel("gpt-3.5-turbo", temperature=0.2),
model=OpenaiChatModel("gpt-4-0125-preview", temperature=0.2),
)
def summary_feedback_llm_call(examples, prompt) -> str: ...


def get_prompt_response(completion: dict) -> dict:
def flatten_messages(completion: dict) -> dict:
request_messages = completion.get("request", {}).get("messages", [])
if len(request_messages) > 1 and request_messages[1].get("content", ""):
prompt = request_messages[1].get("content")
Expand Down
11 changes: 7 additions & 4 deletions log10/feedback/autofeedback.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,10 @@

import click
import openai
from rich.console import Console

from log10.completions.completions import _get_completion
from log10.feedback._summary_feedback_utils import get_prompt_response, summary_feedback_llm_call
from log10.feedback._summary_feedback_utils import flatten_messages, summary_feedback_llm_call
from log10.feedback.feedback import _get_feedback_list
from log10.load import log10, log10_session

Expand Down Expand Up @@ -55,6 +56,7 @@ def _get_examples(self):
"feedback": json.dumps(feedback_values),
}
)
logger.info(f"Sampled completion ids: {[d['completion_id'] for d in few_shot_examples]}")
return few_shot_examples

def predict(self, text: str = None, completion_id: str = None) -> str:
Expand All @@ -64,7 +66,7 @@ def predict(self, text: str = None, completion_id: str = None) -> str:
# Here assumps the completion is summary, prompt is article, response is summary
if completion_id and not text:
completion = _get_completion(completion_id)
pr = get_prompt_response(completion.json()["data"])
pr = flatten_messages(completion.json()["data"])
text = json.dumps(pr)

logger.info(f"{text=}")
Expand All @@ -88,14 +90,15 @@ def auto_feedback_icl(task_id: str, content: str, file: str, completion_id: str,
click.echo("Only one of --content, --file, or --completion_id should be provided.")
return

console = Console()
auto_feedback_icl = AutoFeedbackICL(task_id, num_samples=num_samples)
if completion_id:
results = auto_feedback_icl.predict(completion_id=completion_id)
click.echo(results)
console.print_json(results)
return

if file:
with open(file, "r") as f:
content = f.read()
results = auto_feedback_icl.predict(text=content)
click.echo(results)
console.print_json(results)

0 comments on commit e8cee6e

Please sign in to comment.