Skip to content

Commit

Permalink
Increased response timeout time for chat function
Browse files Browse the repository at this point in the history
  • Loading branch information
tusharad committed Oct 18, 2024
1 parent 9f7ce81 commit 137b451
Show file tree
Hide file tree
Showing 2 changed files with 32 additions and 27 deletions.
2 changes: 1 addition & 1 deletion ollama-haskell.cabal
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
cabal-version: 3.4
name: ollama-haskell
version: 0.1.0.1
version: 0.1.0.2
synopsis: Ollama Haskell library
-- description:
license: MIT
Expand Down
57 changes: 31 additions & 26 deletions src/Data/Ollama/Chat.hs
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ instance FromJSON Role where
parseJSON _ = fail "Invalid Role value"

-- TODO : Add tool_calls parameter

-- | Represents a message within a chat, including its role and content.
data Message = Message
{ role :: Role
Expand All @@ -56,7 +57,7 @@ data Message = Message
-- TODO: Add Options parameter
data ChatOps = ChatOps
{ chatModelName :: Text
-- ^ The name of the chat model to be used.
-- ^ The name of the chat model to be used.
, messages :: NonEmpty Message
-- ^ A non-empty list of messages forming the conversation context.
, tools :: Maybe Text
Expand All @@ -66,7 +67,7 @@ data ChatOps = ChatOps
, stream :: Maybe (ChatResponse -> IO (), IO ())
-- ^ Optional streaming functions where the first handles each chunk of the response, and the second flushes the stream.
, keepAlive :: Maybe Text
-- ^ Optional text to specify keep-alive behavior.
-- ^ Optional text to specify keep-alive behavior.
}

instance Show ChatOps where
Expand Down Expand Up @@ -95,7 +96,7 @@ instance Eq ChatOps where

data ChatResponse = ChatResponse
{ model :: Text
-- ^ The name of the model that generated this response.
-- ^ The name of the model that generated this response.
, createdAt :: UTCTime
-- ^ The timestamp when the response was created.
, message :: Maybe Message
Expand All @@ -113,7 +114,7 @@ data ChatResponse = ChatResponse
, evalCount :: Maybe Int64
-- ^ Optional count of evaluations during the chat process.
, evalDuration :: Maybe Int64
-- ^ Optional duration in milliseconds for evaluations during the chat process.
-- ^ Optional duration in milliseconds for evaluations during the chat process.
}
deriving (Show, Eq)

Expand Down Expand Up @@ -142,14 +143,15 @@ instance FromJSON ChatResponse where
<*> v .:? "eval_count"
<*> v .:? "eval_duration"

-- |
-- A default configuration for initiating a chat with a model.
-- This can be used as a starting point and modified as needed.
--
-- Example:
--
-- > let ops = defaultChatOps { chatModelName = "customModel" }
-- > chat ops
{- |
A default configuration for initiating a chat with a model.
This can be used as a starting point and modified as needed.
Example:
> let ops = defaultChatOps { chatModelName = "customModel" }
> chat ops
-}
defaultChatOps :: ChatOps
defaultChatOps =
ChatOps
Expand All @@ -161,23 +163,26 @@ defaultChatOps =
, keepAlive = Nothing
}

-- |
-- Initiates a chat session with the specified 'ChatOps' configuration and returns either
-- a 'ChatResponse' or an error message.
--
-- This function sends a request to the Ollama chat API with the given options.
--
-- Example:
--
-- > let ops = defaultChatOps
-- > result <- chat ops
-- > case result of
-- > Left errorMsg -> putStrLn ("Error: " ++ errorMsg)
-- > Right response -> print response
{- |
Initiates a chat session with the specified 'ChatOps' configuration and returns either
a 'ChatResponse' or an error message.
This function sends a request to the Ollama chat API with the given options.
Example:
> let ops = defaultChatOps
> result <- chat ops
> case result of
> Left errorMsg -> putStrLn ("Error: " ++ errorMsg)
> Right response -> print response
-}
chat :: ChatOps -> IO (Either String ChatResponse)
chat cOps = do
let url = CU.host defaultOllama
manager <- newManager defaultManagerSettings
manager <-
newManager defaultManagerSettings -- Setting response timeout to 5 minutes, since llm takes time
{ managerResponseTimeout = responseTimeoutMicro (5 * 60 * 1000000)}
initialRequest <- parseRequest $ T.unpack (url <> "/api/chat")
let reqBody = cOps
request =
Expand Down

0 comments on commit 137b451

Please sign in to comment.