Skip to content

Commit

Permalink
added max conversation age logic
Browse files Browse the repository at this point in the history
  • Loading branch information
n3d1117 committed Mar 6, 2023
1 parent 02dfd99 commit 0eeb465
Show file tree
Hide file tree
Showing 4 changed files with 25 additions and 2 deletions.
3 changes: 3 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -13,5 +13,8 @@ SHOW_USAGE=false
# Max number of messages to keep in memory, after which the conversation will be summarised
MAX_HISTORY_SIZE=10

# Max minutes a conversation will live, after which the conversation will be reset to avoid excessive token usage
MAX_CONVERSATION_AGE_MINUTES=180

# Whether to answer to voice messages with the transcript or with a ChatGPT response of the transcript
VOICE_REPLY_WITH_TRANSCRIPT_ONLY=true
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ ALLOWED_TELEGRAM_USER_IDS="USER_ID_1,USER_ID_2,..." # Defaults to "*" (everyone)
PROXY="YOUR_PROXY" # e.g. "http://localhost:8080", defaults to none
SHOW_USAGE=true # Defaults to false
MAX_HISTORY_SIZE=15 # Defaults to 10
MAX_CONVERSATION_AGE_MINUTES=120 # Defaults to 180 (3h)
VOICE_REPLY_WITH_TRANSCRIPT_ONLY=false # Defaults to true
```
* `OPENAI_API_KEY`: Your OpenAI API key, you can get it from [here](https://platform.openai.com/account/api-keys)
Expand All @@ -53,6 +54,7 @@ VOICE_REPLY_WITH_TRANSCRIPT_ONLY=false # Defaults to true
* `PROXY`: Proxy to be used for OpenAI and Telegram bot
* `SHOW_USAGE`: Whether to show OpenAI token usage information after each response
* `MAX_HISTORY_SIZE`: Max number of messages to keep in memory, after which the conversation will be summarised to avoid excessive token usage ([#34](https://github.com/n3d1117/chatgpt-telegram-bot/issues/34))
* `MAX_CONVERSATION_AGE_MINUTES`: Maximum number of minutes a conversation should live, after which the conversation will be reset to avoid excessive token usage
* `VOICE_REPLY_WITH_TRANSCRIPT_ONLY`: Whether to answer to voice messages with the transcript only or with a ChatGPT response of the transcript ([#38](https://github.com/n3d1117/chatgpt-telegram-bot/issues/38))

Additional model parameters can be configured from the `main.py` file:
Expand Down
1 change: 1 addition & 0 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ def main():
'show_usage': os.environ.get('SHOW_USAGE', 'false').lower() == 'true',
'proxy': os.environ.get('PROXY', None),
'max_history_size': int(os.environ.get('MAX_HISTORY_SIZE', 10)),
'max_conversation_age_minutes': int(os.environ.get('MAX_CONVERSATION_AGE_MINUTES', 180)),

# 'gpt-3.5-turbo' or 'gpt-3.5-turbo-0301'
'model': 'gpt-3.5-turbo',
Expand Down
21 changes: 19 additions & 2 deletions openai_helper.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import datetime
import logging
import openai

Expand All @@ -15,7 +16,8 @@ def __init__(self, config: dict):
openai.api_key = config['api_key']
openai.proxy = config['proxy']
self.config = config
self.conversations: dict[int: list] = {} # {chat_id: history}
self.conversations: dict[int: list] = {} # {chat_id: history}
self.last_updated: dict[int: datetime] = {} # {chat_id: last_update_timestamp}

def get_chat_response(self, chat_id: int, query: str) -> str:
"""
Expand All @@ -25,9 +27,11 @@ def get_chat_response(self, chat_id: int, query: str) -> str:
:return: The answer from the model
"""
try:
if chat_id not in self.conversations:
if chat_id not in self.conversations or self.__max_age_reached(chat_id):
self.reset_chat_history(chat_id)

self.last_updated[chat_id] = datetime.datetime.now()

# Summarize the chat history if it's too long to avoid excessive token usage
if len(self.conversations[chat_id]) > self.config['max_history_size']:
logging.info(f'Chat history for chat ID {chat_id} is too long. Summarising...')
Expand Down Expand Up @@ -116,6 +120,19 @@ def reset_chat_history(self, chat_id):
"""
self.conversations[chat_id] = [{"role": "system", "content": self.config['assistant_prompt']}]

def __max_age_reached(self, chat_id) -> bool:
"""
Checks if the maximum conversation age has been reached.
:param chat_id: The chat ID
:return: A boolean indicating whether the maximum conversation age has been reached
"""
if chat_id not in self.last_updated:
return False
last_updated = self.last_updated[chat_id]
now = datetime.datetime.now()
max_age_minutes = self.config['max_conversation_age_minutes']
return last_updated < now - datetime.timedelta(minutes=max_age_minutes)

def __add_to_history(self, chat_id, role, content):
"""
Adds a message to the conversation history.
Expand Down

0 comments on commit 0eeb465

Please sign in to comment.