Skip to content

Commit

Permalink
Single quotes
Browse files Browse the repository at this point in the history
  • Loading branch information
nqn committed Jul 24, 2023
1 parent f99c4ee commit 254d790
Show file tree
Hide file tree
Showing 35 changed files with 631 additions and 711 deletions.
20 changes: 10 additions & 10 deletions examples/agents/biochemist.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,33 +11,33 @@

# Select one of OpenAI or Anthropic models
# model = "noop"
model = "gpt-3.5-turbo-16k"
model = 'gpt-3.5-turbo-16k'
# model = "claude-1"
max_turns = 30

llm = None
summary_model = None
if "claude" in model:
if 'claude' in model:
import anthropic

log10(anthropic)
summary_model = "claude-1-100k"
llm = Anthropic({"model": model})
elif model == "noop":
summary_model = 'claude-1-100k'
llm = Anthropic({'model': model})
elif model == 'noop':
summary_model = model
llm = NoopLLM()
else:
import openai

log10(openai)
summary_model = "gpt-3.5-turbo-16k"
llm = OpenAI({"model": model})
summary_model = 'gpt-3.5-turbo-16k'
llm = OpenAI({'model': model})

# example calls from playground (select 1)
camel_agent(
user_role="Poor PhD Student",
assistant_role="Experienced Computational Chemist",
task_prompt="Perform a molecular dynamics solution of a molecule: CN1CCC[C@H]1c2cccnc2. Design and conduct a 100 ns molecular dynamics simulation of the molecule CN1CCC[C@H]1c2cccnc2 in an explicit solvent environment using the CHARMM force field and analyze the conformational changes and hydrogen bonding patterns over time",
user_role='Poor PhD Student',
assistant_role='Experienced Computational Chemist',
task_prompt='Perform a molecular dynamics solution of a molecule: CN1CCC[C@H]1c2cccnc2. Design and conduct a 100 ns molecular dynamics simulation of the molecule CN1CCC[C@H]1c2cccnc2 in an explicit solvent environment using the CHARMM force field and analyze the conformational changes and hydrogen bonding patterns over time',
summary_model=summary_model,
max_turns=max_turns,
llm=llm,
Expand Down
31 changes: 15 additions & 16 deletions examples/agents/code_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,38 +8,37 @@
from log10.tools import code_extractor

# Select one of OpenAI or Anthropic models
model = "gpt-3.5-turbo-16k"
model = 'gpt-3.5-turbo-16k'
# model = "claude-1"
# model = "noop"
max_turns = 10

llm = None
summary_model = None
extraction_model = None
if "claude" in model:
if 'claude' in model:
import anthropic

log10(anthropic)
summary_model = "claude-1-100k"
extraction_model = "claude-1-100k"
llm = Anthropic({"model": model})
elif model == "noop":
summary_model = 'claude-1-100k'
extraction_model = 'claude-1-100k'
llm = Anthropic({'model': model})
elif model == 'noop':
summary_model = model
extraction_model = model
llm = NoopLLM()
else:
import openai

log10(openai)
summary_model = "gpt-3.5-turbo-16k"
extraction_model = "gpt-4"
llm = OpenAI({"model": model})

summary_model = 'gpt-3.5-turbo-16k'
extraction_model = 'gpt-4'
llm = OpenAI({'model': model})

# example calls from playground (select 1)
user_messages, assistant_messages = camel_agent(
user_role="C developer",
assistant_role="Cybersecurity expert",
user_role='C developer',
assistant_role='Cybersecurity expert',
task_prompt='Correct the following code.\n\n#include <stdio.h>\n#include <string.h>\n\nint main() {\n char password[8];\n int granted = 0;\n\n printf("Enter password: ");\n scanf("%s", password);\n\n if (strcmp(password, "password") == 0) {\n granted = 1;\n }\n\n if (granted) {\n printf("Access granted.\\n");\n } else {\n printf("Access denied.\\n");\n }\n\n return 0;\n}',
summary_model=summary_model,
max_turns=max_turns,
Expand All @@ -49,13 +48,13 @@
full_response = assistant_messages[-1].content

# Next extract just the C code
code = code_extractor(full_response, "C", extraction_model, llm=llm)
print(f"Extracted code\n###\n{code}")
code = code_extractor(full_response, 'C', extraction_model, llm=llm)
print(f'Extracted code\n###\n{code}')

# Evaluate if the code compiles
result = compile(code)
if result is True:
print("Compilation successful")
print('Compilation successful')
else:
print("Compilation failed with error:")
print('Compilation failed with error:')
print(result[1])
20 changes: 10 additions & 10 deletions examples/agents/coder.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,34 +10,34 @@
load_dotenv()

# Select one of OpenAI or Anthropic models
model = "gpt-3.5-turbo-16k"
model = 'gpt-3.5-turbo-16k'
# model = "claude-1"
# model = "noop"
max_turns = 30

llm = None
summary_model = None
if "claude" in model:
if 'claude' in model:
import anthropic

log10(anthropic)
summary_model = "claude-1-100k"
llm = Anthropic({"model": model})
elif model == "noop":
summary_model = 'claude-1-100k'
llm = Anthropic({'model': model})
elif model == 'noop':
summary_model = model
llm = NoopLLM()
else:
import openai

log10(openai)
summary_model = "gpt-3.5-turbo-16k"
llm = OpenAI({"model": model})
summary_model = 'gpt-3.5-turbo-16k'
llm = OpenAI({'model': model})

# example calls from playground (select 1)
camel_agent(
user_role="Stock Trader",
assistant_role="Python Programmer",
task_prompt="Develop a trading bot for the stock market",
user_role='Stock Trader',
assistant_role='Python Programmer',
task_prompt='Develop a trading bot for the stock market',
summary_model=summary_model,
max_turns=max_turns,
llm=llm,
Expand Down
18 changes: 9 additions & 9 deletions examples/agents/cybersecurity_expert.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,33 +10,33 @@
load_dotenv()

# Select one of OpenAI or Anthropic models
model = "gpt-3.5-turbo-16k"
model = 'gpt-3.5-turbo-16k'
# model = "claude-1"
# model = "noop"
max_turns = 30

llm = None
summary_model = None
if "claude" in model:
if 'claude' in model:
import anthropic

log10(anthropic)
summary_model = "claude-1-100k"
llm = Anthropic({"model": model})
elif model == "noop":
summary_model = 'claude-1-100k'
llm = Anthropic({'model': model})
elif model == 'noop':
summary_model = model
llm = NoopLLM()
else:
import openai

log10(openai)
summary_model = "gpt-3.5-turbo-16k"
llm = OpenAI({"model": model})
summary_model = 'gpt-3.5-turbo-16k'
llm = OpenAI({'model': model})

# example calls from playground (select 1)
camel_agent(
user_role="C developer",
assistant_role="Cybersecurity expert",
user_role='C developer',
assistant_role='Cybersecurity expert',
task_prompt='Correct the following code.\n\n#include <stdio.h>\n#include <string.h>\n\nint main() {\n char password[8];\n int granted = 0;\n\n printf("Enter password: ");\n scanf("%s", password);\n\n if (strcmp(password, "password") == 0) {\n granted = 1;\n }\n\n if (granted) {\n printf("Access granted.\\n");\n } else {\n printf("Access denied.\\n");\n }\n\n return 0;\n}',
summary_model=summary_model,
max_turns=max_turns,
Expand Down
20 changes: 10 additions & 10 deletions examples/agents/email_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,34 +10,34 @@
load_dotenv()

# Select one of OpenAI or Anthropic models
model = "gpt-3.5-turbo-16k"
model = 'gpt-3.5-turbo-16k'
# model = "claude-1"
# model = "noop"
max_turns = 30

llm = None
summary_model = None
if "claude" in model:
if 'claude' in model:
import anthropic

log10(anthropic)
summary_model = "claude-1-100k"
llm = Anthropic({"model": model})
elif model == "noop":
summary_model = 'claude-1-100k'
llm = Anthropic({'model': model})
elif model == 'noop':
summary_model = model
llm = NoopLLM()
else:
import openai

log10(openai)
summary_model = "gpt-3.5-turbo-16k"
llm = OpenAI({"model": model})
summary_model = 'gpt-3.5-turbo-16k'
llm = OpenAI({'model': model})

# example calls from playground (select 1)
camel_agent(
user_role="Sales email copyeditor",
assistant_role="Sales email copywriter",
task_prompt="Write a sales email to Pfizer about a new healthcare CRM",
user_role='Sales email copyeditor',
assistant_role='Sales email copywriter',
task_prompt='Write a sales email to Pfizer about a new healthcare CRM',
summary_model=summary_model,
max_turns=max_turns,
llm=llm,
Expand Down
13 changes: 6 additions & 7 deletions examples/agents/scrape_summarizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,25 +5,24 @@
from log10.load import log10
from log10.openai import OpenAI


# Select one of OpenAI or Anthropic models
model = "gpt-3.5-turbo-16k"
model = 'gpt-3.5-turbo-16k'
# model = "claude-1"
# model = "noop"

llm = None
if "claude" in model:
if 'claude' in model:
import anthropic

log10(anthropic)
llm = Anthropic({"model": model})
elif model == "noop":
llm = Anthropic({'model': model})
elif model == 'noop':
llm = NoopLLM()
else:
import openai

log10(openai)
llm = OpenAI({"model": model})
llm = OpenAI({'model': model})

url = "https://nytimes.com"
url = 'https://nytimes.com'
print(scrape_summarizer(url, llm))
20 changes: 10 additions & 10 deletions examples/agents/translator.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,35 +10,35 @@
load_dotenv()

# Select one of OpenAI or Anthropic models
model = "gpt-3.5-turbo-16k"
model = 'gpt-3.5-turbo-16k'
# model = "claude-1"
# model = "noop"

max_turns = 30

llm = None
summary_model = None
if "claude" in model:
if 'claude' in model:
import anthropic

log10(anthropic)
summary_model = "claude-1-100k"
llm = Anthropic({"model": model})
elif model == "noop":
summary_model = 'claude-1-100k'
llm = Anthropic({'model': model})
elif model == 'noop':
summary_model = model
llm = NoopLLM()
else:
import openai

log10(openai)
summary_model = "gpt-3.5-turbo-16k"
llm = OpenAI({"model": model})
summary_model = 'gpt-3.5-turbo-16k'
llm = OpenAI({'model': model})

# example calls from playground (select 1)
camel_agent(
user_role="Web3 guru",
assistant_role="Hindi translator",
task_prompt="Write a blog post about web3 in Hindi",
user_role='Web3 guru',
assistant_role='Hindi translator',
task_prompt='Write a blog post about web3 in Hindi',
summary_model=summary_model,
max_turns=max_turns,
llm=llm,
Expand Down
34 changes: 14 additions & 20 deletions examples/evals/basic_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,49 +4,43 @@
from log10.openai import OpenAI

# Choose provider
provider = "openai" # "anthropic"
provider = 'openai' # "anthropic"

# TODO: Replace with LLM abstraction.
llm = None
if provider == "openai":
if provider == 'openai':
llm = OpenAI(
{
"model": "gpt-3.5-turbo",
"temperature": 0,
"max_tokens": 1024,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
}
)
elif provider == "anthropic":
llm = Anthropic(
{
"model": "claude-1",
"temperature": 0,
"max_tokens_to_sample": 1024,
'model': 'gpt-3.5-turbo',
'temperature': 0,
'max_tokens': 1024,
'top_p': 1,
'frequency_penalty': 0,
'presence_penalty': 0,
}
)
elif provider == 'anthropic':
llm = Anthropic({'model': 'claude-1', 'temperature': 0, 'max_tokens_to_sample': 1024,})
else:
print(
f"Unsupported provider option: {provider}. Supported providers are 'openai' or 'anthropic'."
)

# Ground truth dataset to use for evaluation
eval_dataset = (
"match_data.csv",
{"input": "my_input_column", "ideal": "my_output_column"},
'match_data.csv',
{'input': 'my_input_column', 'ideal': 'my_output_column'},
)

# Specify which metrics to use. Options are:
# 'match': model_output.startswith(ideal)
# 'includes': ideal.lower() in model_output.lower()
# 'fuzzy_match': similar to includes but remove punctuation, articles and extra whitespace and compare both ways
eval_metric = "includes"
eval_metric = 'includes'

# Path to output file to store the metrics
# Example from: https://github.com/openai/evals/blob/a24f20a357ecb3cc5eec8323097aeade9585796c/evals/elsuite/test/match.py
out_file_path = "match_output.csv"
out_file_path = 'match_output.csv'

# Get back and id and url for the summary of results and status
# todo: get back path to logfile; eval_id, eval_url =
Expand Down
Loading

0 comments on commit 254d790

Please sign in to comment.