Skip to content

Commit

Permalink
Add fastchat api server tutorial
Browse files Browse the repository at this point in the history
  • Loading branch information
ruiyiw committed Oct 30, 2023
2 parents 8e92192 + b6c03cf commit 98d4189
Show file tree
Hide file tree
Showing 22 changed files with 202 additions and 116 deletions.
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -33,4 +33,4 @@ tests/state_of_the_union.txt

# Build
build
!dummy_file
!dummy_file
89 changes: 0 additions & 89 deletions data_process/data/multiturn_data/multiturn_data_preprocess.py

This file was deleted.

74 changes: 59 additions & 15 deletions data_process/redis_data_filtering/prompt_reverse_engineering.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import os
from collections import defaultdict
from typing import Any, Dict, List, Tuple, Union, cast

import transformers
import pandas as pd
import rich
from rich.console import Console
Expand All @@ -15,33 +15,52 @@
import enum

#PROMPT_PREFIX = "Prompt after formatting:\n"

MAX_TOKEN = 2048
PROMPT_TEMPLATE="""Prompt after formatting:\nImagine you are {agent}, your task is to act/speak as {agent} would, keeping in mind {agent}'s social goal.
You can find {agent}'s background and goal in the 'Here is the context of the interaction' field.
Note that {agent}'s secret and goal is only visible to you.
You should try your best to achieve {agent}'s goal in a way that align with their character traits.
Additionally, maintaining the conversation's naturalness and realism is essential (e.g., do not repeat what other people has already said before).
{history}.
You are at Turn #{turn_number}. Your available action types are
{action_list}.
Note: You can "leave" this conversation if 1. you have achieved your social goals, 2. this conversation makes you uncomfortable, 3. you find it uninteresting/you lose your patience, 4. or for other reasons you want to leave.
Please only generate a JSON string including the action type and the argument.
Your action should follow the given format:
{format_instructions}
"""
You are at Turn #{turn_number}."""

#PYDANTIC_FORMAT_INSTRUCTIONS.format(schema=schema_str)
FORMAT_TEMPLATE = """\nAs an example, for the schema {\"properties\": {\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\": \"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}
the object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema. The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.
\nHere is the output schema:\n```\n{\"description\": \"An interface for messages.\\nThere is only one required method: to_natural_language\", \"properties\": {\"action_type\": {\"title\": \"Action Type\", \"description\": \"whether to speak at this turn or choose to not do anything\", \"enum\": [\"none\", \"speak\", \"non-verbal communication\", \"action\", \"leave\"], \"type\": \"string\"}, \"argument\": {\"title\": \"Argument\", \"description\": \"the utterance if choose to speak, the expression or gesture if choose non-verbal communication, or the physical action if choose action\", \"type\": \"string\"}}, \"required\": [\"action_type\", \"argument\"]}\n```\u001b[0m"""


PROMPT_TEMPLATE_W_FORMAT="""Prompt after formatting:\nImagine you are {agent}, your task is to act/speak as {agent} would, keeping in mind {agent}'s social goal.
You can find {agent}'s background and goal in the 'Here is the context of the interaction' field.
Note that {agent}'s secret and goal is only visible to you.
You should try your best to achieve {agent}'s goal in a way that align with their character traits.
Additionally, maintaining the conversation's naturalness and realism is essential (e.g., do not repeat what other people has already said before).
{history}.
You are at Turn #{turn_number}. Your available action types are
"none action speak non-verbal communication leave".
Note: You can "leave" this conversation if 1. you have achieved your social goals, 2. this conversation makes you uncomfortable, 3. you find it uninteresting/you lose your patience, 4. or for other reasons you want to leave.
Please only generate a JSON string including the action type and the argument.
Your action should follow the given format:
\nAs an example, for the schema {\"properties\": {\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\": \"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}
the object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema. The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.
\nHere is the output schema:\n```\n{\"description\": \"An interface for messages.\\nThere is only one required method: to_natural_language\", \"properties\": {\"action_type\": {\"title\": \"Action Type\", \"description\": \"whether to speak at this turn or choose to not do anything\", \"enum\": [\"none\", \"speak\", \"non-verbal communication\", \"action\", \"leave\"], \"type\": \"string\"}, \"argument\": {\"title\": \"Argument\", \"description\": \"the utterance if choose to speak, the expression or gesture if choose non-verbal communication, or the physical action if choose action\", \"type\": \"string\"}}, \"required\": [\"action_type\", \"argument\"]}\n```\u001b[0m
"""
# static
ACTION_LIST = "none action speak non-verbal communication leave" #" ".join(ActionType)

ACTION_REVERSE_MAP = {"left ": "leave", 'did n': 'none', 'said:': 'speak'}

MODEL_CHECKPOINT = "meta-llama/Llama-2-13b-chat-hf"
HF_TOKEN = "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"


TOKENIZER = transformers.AutoTokenizer.from_pretrained(
MODEL_CHECKPOINT,
padding = False,
truncation = False,
token=HF_TOKEN,
)

def to_natural_language(self) -> str:
match self.action_type:
Expand Down Expand Up @@ -101,10 +120,27 @@ def generate_result(msg):

return str_result

def reverse_episode_log(epilog, later_speak=False):
def surpass_max_token_check(string, max_token=MAX_TOKEN, tokenizer=TOKENIZER):
prompt_tokens = len(tokenizer(string)['input_ids'])
return max(prompt_tokens - max_token, 0)

def truncate_prompt_to_length(dia_his, surpass_num, tokenizer=TOKENIZER):
# context_len = len(tokenizer(context)['input_ids'])
dia_sen = dia_his.split("\n")
remove_len = 0
i = 0
while remove_len < surpass_num:
remove_len+=len(tokenizer(dia_sen[i])['input_ids'])
i+=1
trunc_dia = "\n".join(p for p in dia_sen[i:])
return trunc_dia


def reverse_episode_log(epilog, later_speak=False, include_format=False, max_token=MAX_TOKEN):
episode_msg = epilog.messages
# per episode
agent_model = epilog.models[1]
promt_template = PROMPT_TEMPLATE_W_FORMAT if include_format else PROMPT_TEMPLATE

if len(episode_msg) > 0:
init_loop = episode_msg[0]
Expand All @@ -131,23 +167,31 @@ def reverse_episode_log(epilog, later_speak=False):
dial_history += "\n"+tpl[2]
else:
# for the first context, we don't need \n
dial_history += tpl[2]
context = tpl[2]
dial_history += context

if tpl[0] == speaker: # if speaker is the agent, use what he said as result
str_result = generate_result(tpl[2])
# check if this is the end
if i%2 == turn_div:
# take alternative turns as we always want to predict one agent, not both
next_turn = i
prompt = PROMPT_TEMPLATE.format(
agent=speaker, history=dial_history, turn_number=next_turn,
action_list=ACTION_LIST, format_instructions=FORMAT_TEMPLATE)
prompt = promt_template.format(
agent=speaker, history=dial_history, turn_number=next_turn)
over_tokens = surpass_max_token_check(prompt)
if over_tokens > 0:
all_dial = dial_history[len(context):]
#print(all_dial)
trun_dial = truncate_prompt_to_length(all_dial, over_tokens)
prompt = promt_template.format(
agent=speaker, history=context+"\n"+trun_dial, turn_number=next_turn)
turn_dic["prompt"] = prompt
turn_dic['result'] = str_result
prompt_result_instances.append(turn_dic)

return prompt_result_instances


def parse_prompt_to_json(episode, dir, init_speak):
prompt_result_instances = reverse_episode_log(episode, init_speak)

Expand Down
2 changes: 1 addition & 1 deletion data_process/redis_data_filtering/redis_filtering.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ def goal_filter_per_env_agent(episodes):
env_tpls.append((episodes[agent1_rank[i]], 0))
env_tpls.append((episodes[agent2_rank[i]], 1))
else:
if goal_score['agent1'][agent1_rank[i]] >= min(GOAL_KEEP_THRESHOD, agent1_avg) and (goal_score['agent2'][agent2_rank[i]] >= min(KEEP_THRESHOD, agent2_avg)):
if goal_score['agent1'][agent1_rank[i]] >= min(GOAL_KEEP_THRESHOD, agent1_avg) and (goal_score['agent2'][agent2_rank[i]] >= min(GOAL_KEEP_THRESHOD, agent2_avg)):
env_tpls.append((episodes[agent1_rank[i]], 0))
env_tpls.append((episodes[agent1_rank[i]], 1))

Expand Down
2 changes: 2 additions & 0 deletions llm_deploy/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ Go to the vllm dir and pip install -e .
To notice https://github.com/vllm-project/vllm/issues/1283, need to modify the config file to "== 2.0.1" and the pytorch version if facing with CUDA version error.



## Deploy finetuned model on babel via FastChat API server
### Login with SSH key
1. Add public ed25519 key to server
Expand Down Expand Up @@ -161,6 +162,7 @@ If the above command runs successfully, you should be able to use REST API on yo




### Userful resource links for babel
1. https://hpc.lti.cs.cmu.edu/wiki/index.php?title=BABEL#Cluster_Architecture
2. https://hpc.lti.cs.cmu.edu/wiki/index.php?title=VSCode
Expand Down
12 changes: 12 additions & 0 deletions llm_ft/data/create_dummy.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
import json

dummy_qa = {"id": "", "conversations": [{"from": "human", "value": "How old is Haofei?"}, {"from": "gpt", "value": "He is one year old."}]}

res = []
for i in range(1000):
new_qa = dict(dummy_qa)
new_qa["id"] = f"identity_{i}"
res.append(new_qa)

with open("./dummy_convs.json", "w") as f:
json.dump(res, f, indent=4)
28 changes: 28 additions & 0 deletions llm_ft/data/data_filter_out_long.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
import json
import transformers

INPUT_PATH = "fastchat-ft-gpt4-gpt4-easy-2-side-partial-speak.json"
OUTPUT_PATH = "fastchat-ft-gpt4-gpt4-easy-2-side-partial-speak-no-long.json"
MODEL_CHECKPOINT = "meta-llama/Llama-2-13b-chat-hf"
HF_TOKEN = "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"

with open(INPUT_PATH, 'r') as f:
data = json.load(f)

tokenizer = transformers.AutoTokenizer.from_pretrained(
MODEL_CHECKPOINT,
padding = False,
truncation = False,
token=HF_TOKEN,
)

res = []
for d in data:
for conv in d['conversations']:
if conv['from'] == "human":
input_ids = tokenizer(conv['value'])
if len(input_ids) <= 2048:
res.append(d)

with open(OUTPUT_PATH, 'w') as f:
json.dump(res, f, indent=4)
16 changes: 16 additions & 0 deletions llm_ft/data/data_keep_only_speak.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
import json

INPUT_PATH = "fastchat-ft-gpt4-gpt4-easy-2-side-partial.json"
OUTPUT_PATH = "fastchat-ft-gpt4-gpt4-easy-2-side-partial-speak.json"

with open(INPUT_PATH, 'r') as f:
data = json.load(f)

res = []
for d in data:
for conv in d['conversations']:
if conv['from'] == "gpt" and "'action_type': 'speak'" in conv['value']:
res.append(d)

with open(OUTPUT_PATH, 'w') as f:
json.dump(res, f, indent=4)
8 changes: 7 additions & 1 deletion llm_ft/fastchat/model/model_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,10 +60,12 @@ def load_model(self, model_path: str, from_pretrained_kwargs: dict):
use_fast=self.use_fast_tokenizer,
revision=revision,
trust_remote_code=True,
token=None if not "token" in from_pretrained_kwargs else from_pretrained_kwargs["token"]
)
except TypeError:
tokenizer = AutoTokenizer.from_pretrained(
model_path, use_fast=False, revision=revision, trust_remote_code=True
model_path, use_fast=False, revision=revision, trust_remote_code=True,
token=None if not "token" in from_pretrained_kwargs else from_pretrained_kwargs["token"]
)
try:
model = AutoModelForCausalLM.from_pretrained(
Expand Down Expand Up @@ -154,6 +156,7 @@ def load_model(
awq_config: Optional[AWQConfig] = None,
revision: str = "main",
debug: bool = False,
hf_access_token: Optional[str|None] = None,
):
"""Load a model from Hugging Face."""
# get model adapter
Expand Down Expand Up @@ -280,6 +283,9 @@ def load_model(

if dtype is not None: # Overwrite dtype if it is provided in the arguments.
kwargs["torch_dtype"] = dtype

if hf_access_token:
kwargs["token"] = hf_access_token

# Load model
model, tokenizer = adapter.load_model(model_path, kwargs)
Expand Down
4 changes: 4 additions & 0 deletions llm_ft/fastchat/serve/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -236,6 +236,7 @@ def main(args):
judge_sent_end=args.judge_sent_end,
debug=args.debug,
history=not args.no_history,
hf_access_token = args.hf_access_token,
)
except KeyboardInterrupt:
print("exit...")
Expand Down Expand Up @@ -281,5 +282,8 @@ def main(args):
action="store_true",
help="Print useful debug information (e.g., prompts)",
)
parser.add_argument(
"--hf-access-token", type=str, default=None, help="Optional access token for Hugging Face."
)
args = parser.parse_args()
main(args)
2 changes: 2 additions & 0 deletions llm_ft/fastchat/serve/inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -308,6 +308,7 @@ def chat_loop(
judge_sent_end: bool = True,
debug: bool = True,
history: bool = True,
hf_access_token: Optional[str|None] = None,
):
# Model
model, tokenizer = load_model(
Expand All @@ -322,6 +323,7 @@ def chat_loop(
awq_config=awq_config,
revision=revision,
debug=debug,
hf_access_token=hf_access_token,
)
generate_stream_func = get_generate_stream_function(model, model_path)

Expand Down
Loading

0 comments on commit 98d4189

Please sign in to comment.