Skip to content

Commit

Permalink
Add deploy and eval code in pipelines
Browse files Browse the repository at this point in the history
  • Loading branch information
ruiyiw committed Dec 6, 2023
1 parent 31694c4 commit 00c2a0f
Show file tree
Hide file tree
Showing 6 changed files with 132 additions and 3 deletions.
10 changes: 9 additions & 1 deletion llm_self_train/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,4 +13,12 @@ wandb_token: 99caa13ec9552adf0e92e5c30021307ce3cf7fa4

#gcp
oauth2_token_location: ./resources/auth_token.key
bucket_name: pipeline-test-storage
bucket_name: pipeline-test-storage

#deploy and eval
deploy_port: 8001
deploy_external_ip: 34.135.182.251
deploy_under_same_server: True
eval_model_name: "custom_model"
custom_openai_key: EMPTY
eval_batch_size: 2
13 changes: 13 additions & 0 deletions llm_self_train/deploy.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
#!/bin/bash

# Starting the controller
python3 -m fastchat.serve.controller &
echo $! > tmp/ontroller.pid

# Starting the model worker with the specified model path
python3 -m fastchat.serve.model_worker --model-path Mistral-7B-Instruct-v0.1 &
echo $! > tmp/model_worker.pid

# Starting the OpenAI API server on host 0.0.0.0 and port 8000
python3 -m fastchat.serve.openai_api_server --host 0.0.0.0 --port 8001
echo $! > tmp/openai_api_server.pid
15 changes: 15 additions & 0 deletions llm_self_train/eval.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
python examples/experiment_eval.py \
--gin_file sotopia_conf/generation_utils_conf/generate.gin \
--gin_file sotopia_conf/server_conf/server.gin \
--gin_file sotopia_conf/run_async_server_in_batch.gin \
'--gin.ENV_IDS=["01H7VFHNV13MHN97GAH73E3KM8"]' \
'--gin.AGENT1_MODEL="gpt-3.5-turbo"' \
'--gin.AGENT2_MODEL="custom_model"' \
'--gin.CUSTOM_MODEL_NAME="gpt-3.5-turbo"' \
'--gin.CUSTOM_OPENAI_KEY="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"' \
'--gin.CUSTOM_OPENAI_API_BASE="https://api.openai.com/v1/models"' \
'--gin.MAX_RETRIES=7' \
'--gin.BATCH_SIZE=2' \
'--gin.TAG="test-gin-file"' \
'--gin.PUSH_TO_DB=False' \
'--gin.TAG_TO_CHECK_EXISTING_EPISODES="test-gin-file"'
1 change: 0 additions & 1 deletion llm_self_train/pipelines/deploy_model.py

This file was deleted.

56 changes: 56 additions & 0 deletions llm_self_train/pipelines/do_deploy.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
import yaml
import re
import glob
import os
import signal
import subprocess

with open("../config.yml", 'r') as f:
config = yaml.safe_load(f)


def overwrite_deploy_bash(deploy_model_name, deploy_port):
with open("../deploy.sh", 'r') as f:
lines = f.readlines()

with open("../deploy.sh", 'w') as f:
for line in lines:
if "--model-path" in line:
line = re.sub(r'--model-path \S+',
f'--model-path {deploy_model_name}', line)
if "--port" in line:
line = re.sub(r'--port \d+', f'--port {deploy_port}', line)

f.write(line)


def terminate_deploy_processes():
pattern = os.path.join("../tmp/", "*.pid")
pid_files = glob.glob(pattern)
if len(pid_files) == 0:
print("No process on deploy. Continue...")
else:
print("Terminating processes on deploy...")
for pid_file in pid_files:
try:
with open(pid_file, 'r') as f:
pid = int(f.read().strip())
os.kill(pid, signal.SIGKILL)
pid_name = pid_file.split(".")[0]
print(f"Terminated {pid_name} pid.")
# delete pid file
os.remove(pid_file)
except ProcessLookupError:
print(f"No process found with PID read from {pid_file}")


def run_deploy_bash(): # this command should be called under main.py
args = "bash deploy.sh"
subprocess.run(args)


def deploy(deploy_model_name):
terminate_deploy_processes()
overwrite_deploy_bash(deploy_model_name=deploy_model_name,
deploy_port=config["deploy_port"])
run_deploy_bash()
40 changes: 39 additions & 1 deletion llm_self_train/pipelines/do_eval.py
Original file line number Diff line number Diff line change
@@ -1 +1,39 @@
from pipelines import config
import yaml
import re
import subprocess

with open("../config.yml", 'r') as f:
config = yaml.safe_load(f)


def overwrite_eval_bash(changes: dict):
with open("../eval.bash", 'r') as file:
lines = file.readlines()

with open("../eval.bash", 'w') as file:
for line in lines:
for key, value in changes.items():
if key in line:
# Regex to replace the entire argument after the key
line = re.sub(rf'(--gin\.{key}=).*', rf'\1{value}', line)
file.write(line)


def run_eval_bash(): # this command should be called under main.py
args = "bash eval.sh"
subprocess.run(args)


def eval(env_ids, eval_tag, deploy_model_name):
ip = "localhost" if config["deploy_under_same_server"] else config["deploy_external_ip"]
changes = {
"ENV_IDs": env_ids,
"AGENT2_MODEL": config["eval_model_name"],
"CUSTOM_MODEL_NAME": deploy_model_name,
"CUSTOM_OPENAI_API_BASE": f'http://{ip}:{config["deploy_port"]}/v1',
"TAG": eval_tag,
"TAG_TO_CHECK_EXISTING_EPISODES": eval_tag,
"BATCH_SIZE": config["eval_batch_size"]
}
overwrite_eval_bash(changes)
run_eval_bash()

0 comments on commit 00c2a0f

Please sign in to comment.