From 693e9187e63de8cb8b604c17c2e2712e916e3379 Mon Sep 17 00:00:00 2001 From: Victor Date: Thu, 12 Dec 2024 15:40:30 -0800 Subject: [PATCH 1/4] Fix content-part encoding and decoding for Google API. (#212) * Make JsonProcessor process ContentPart properly * Explicitly remove ```json ``` * Add a failing test for #209 * Pass the tests for #209 * Fix JasonProcessor content processing when regex is present * Add live google ai call tests for messages with image parts --- lib/chat_models/chat_google_ai.ex | 53 +++++++++++- lib/message_processors/json_processor.ex | 13 ++- ...context-specific-image-descriptions.livemd | 8 +- test/chat_models/chat_google_ai_test.exs | 86 ++++++++++++++++++- 4 files changed, 146 insertions(+), 14 deletions(-) diff --git a/lib/chat_models/chat_google_ai.ex b/lib/chat_models/chat_google_ai.ex index 43b47a4..bb5678d 100644 --- a/lib/chat_models/chat_google_ai.ex +++ b/lib/chat_models/chat_google_ai.ex @@ -211,17 +211,60 @@ defmodule LangChain.ChatModels.ChatGoogleAI do } end - def for_api(%Message{} = message) do + def for_api(%Message{content: content} = message) when is_binary(content) do %{ "role" => map_role(message.role), "parts" => [%{"text" => message.content}] } end + def for_api(%Message{content: content} = message) when is_list(content) do + %{ + "role" => message.role, + "parts" => Enum.map(content, &for_api/1) + } + end + def for_api(%ContentPart{type: :text} = part) do %{"text" => part.content} end + # Supported image types: png, jpeg, webp, heic, heif: https://ai.google.dev/gemini-api/docs/vision?lang=rest#technical-details-image + def for_api(%ContentPart{type: :image} = part) do + mime_type = + case Keyword.get(part.options || [], :media, nil) do + :png -> + "image/png" + + type when type in [:jpeg, :jpg] -> + "image/jpeg" + + :webp -> + "image/webp" + + :heic -> + "image/heic" + + :heif -> + "image/heif" + + type when is_binary(type) -> + "image/type" + + other -> + message = "Received unsupported media type for ContentPart: #{inspect(other)}" + Logger.error(message) + raise LangChainError, message + end + + %{ + "inline_data" => %{ + "mime_type" => mime_type, + "data" => part.content + } + } + end + def for_api(%ToolCall{} = call) do %{ "functionCall" => %{ @@ -598,12 +641,16 @@ defmodule LangChain.ChatModels.ChatGoogleAI do def do_process_response(_model, {:error, %Jason.DecodeError{} = response}, _) do error_message = "Received invalid JSON: #{inspect(response)}" Logger.error(error_message) - {:error, LangChainError.exception(type: "invalid_json", message: error_message, original: response)} + + {:error, + LangChainError.exception(type: "invalid_json", message: error_message, original: response)} end def do_process_response(_model, other, _) do Logger.error("Trying to process an unexpected response. #{inspect(other)}") - {:error, LangChainError.exception(type: "unexpected_response", message: "Unexpected response")} + + {:error, + LangChainError.exception(type: "unexpected_response", message: "Unexpected response")} end @doc false diff --git a/lib/message_processors/json_processor.ex b/lib/message_processors/json_processor.ex index 12117a5..16e09f9 100644 --- a/lib/message_processors/json_processor.ex +++ b/lib/message_processors/json_processor.ex @@ -110,7 +110,7 @@ defmodule LangChain.MessageProcessors.JsonProcessor do @spec run(LLMChain.t(), Message.t()) :: {:cont, Message.t()} | {:halt, Message.t()} def run(%LLMChain{} = chain, %Message{} = message) do - case Jason.decode(message.processed_content) do + case Jason.decode(content_to_string(message.processed_content)) do {:ok, parsed} -> if chain.verbose, do: IO.puts("Parsed JSON text to a map") {:cont, %Message{message | processed_content: parsed}} @@ -122,7 +122,9 @@ defmodule LangChain.MessageProcessors.JsonProcessor do end def run(%LLMChain{} = chain, %Message{} = message, regex_pattern) do - case Regex.run(regex_pattern, message.processed_content, capture: :all_but_first) do + case Regex.run(regex_pattern, content_to_string(message.processed_content), + capture: :all_but_first + ) do [json] -> if chain.verbose, do: IO.puts("Extracted JSON text from message") # run recursive call on just the extracted JSON @@ -132,4 +134,11 @@ defmodule LangChain.MessageProcessors.JsonProcessor do {:halt, Message.new_user!("ERROR: No JSON found")} end end + + defp content_to_string([ + %LangChain.Message.ContentPart{type: :text, content: content} + ]), + do: content + + defp content_to_string(content), do: content end diff --git a/notebooks/context-specific-image-descriptions.livemd b/notebooks/context-specific-image-descriptions.livemd index ea83157..3967ab6 100644 --- a/notebooks/context-specific-image-descriptions.livemd +++ b/notebooks/context-specific-image-descriptions.livemd @@ -4,7 +4,7 @@ ```elixir Mix.install([ - {:langchain, "~> 0.3.0-rc.0"}, + {:langchain, github: "brainlid/langchain"}, {:kino, "~> 0.12.0"} ]) ``` @@ -181,7 +181,7 @@ image_data_from_other_system = "image of urban art mural on underpass at 507 Kin %{llm: openai_chat_model, verbose: true} |> LLMChain.new!() |> LLMChain.apply_prompt_templates(messages, %{extra_image_info: image_data_from_other_system}) - |> LLMChain.message_processors([JsonProcessor.new!()]) + |> LLMChain.message_processors([JsonProcessor.new!(~r/```json(.*?)```/s)]) |> LLMChain.run(mode: :until_success) updated_chain.last_message.processed_content @@ -242,7 +242,7 @@ image_data_from_other_system = "image of urban art mural on underpass at 507 Kin %{llm: anthropic_chat_model, verbose: true} |> LLMChain.new!() |> LLMChain.apply_prompt_templates(messages, %{extra_image_info: image_data_from_other_system}) - |> LLMChain.message_processors([JsonProcessor.new!()]) + |> LLMChain.message_processors([JsonProcessor.new!(~r/```json(.*?)```/s)]) |> LLMChain.run(mode: :until_success) updated_chain.last_message.processed_content @@ -262,5 +262,3 @@ Here's what I got from it: ``` We would want to run multiple tests on a small sampling of images and tweak our prompt until we are happy with the result. Then, we can process full batch and save our work as a template for future projects as well. - - diff --git a/test/chat_models/chat_google_ai_test.exs b/test/chat_models/chat_google_ai_test.exs index 7e882f3..b99c817 100644 --- a/test/chat_models/chat_google_ai_test.exs +++ b/test/chat_models/chat_google_ai_test.exs @@ -155,6 +155,47 @@ defmodule ChatModels.ChatGoogleAITest do } = tool_result end + test "generate a map containing a text and an image part (bug #209)", %{google_ai: google_ai} do + messages = [ + %LangChain.Message{ + content: + "You are an expert at providing an image description for assistive technology and SEO benefits.", + role: :system + }, + %LangChain.Message{ + content: [ + %LangChain.Message.ContentPart{ + type: :text, + content: "This is the text." + }, + %LangChain.Message.ContentPart{ + type: :image, + content: "/9j/4AAQSkz", + options: [media: :jpg, detail: "low"] + } + ], + role: :user + } + ] + + data = ChatGoogleAI.for_api(google_ai, messages, []) + assert %{"contents" => [msg1]} = data + + assert %{ + "parts" => [ + %{ + "text" => "This is the text." + }, + %{ + "inline_data" => %{ + "mime_type" => "image/jpeg", + "data" => "/9j/4AAQSkz" + } + } + ] + } = msg1 + end + test "translates a Message with function results to the expected structure" do expected = %{ @@ -402,7 +443,9 @@ defmodule ChatModels.ChatGoogleAITest do ] } - assert [{:error, %LangChainError{} = error}] = ChatGoogleAI.do_process_response(model, response) + assert [{:error, %LangChainError{} = error}] = + ChatGoogleAI.do_process_response(model, response) + assert error.type == "changeset" assert error.message == "role: is invalid" end @@ -483,7 +526,9 @@ defmodule ChatModels.ChatGoogleAITest do } } - assert {:error, %LangChainError{} = error} = ChatGoogleAI.do_process_response(model, response) + assert {:error, %LangChainError{} = error} = + ChatGoogleAI.do_process_response(model, response) + assert error.type == nil assert error.message == "Invalid request" end @@ -491,14 +536,19 @@ defmodule ChatModels.ChatGoogleAITest do test "handles Jason.DecodeError", %{model: model} do response = {:error, %Jason.DecodeError{}} - assert {:error, %LangChainError{} = error} = ChatGoogleAI.do_process_response(model, response) + assert {:error, %LangChainError{} = error} = + ChatGoogleAI.do_process_response(model, response) + assert error.type == "invalid_json" assert "Received invalid JSON:" <> _ = error.message end test "handles unexpected response with error", %{model: model} do response = %{} - assert {:error, %LangChainError{} = error} = ChatGoogleAI.do_process_response(model, response) + + assert {:error, %LangChainError{} = error} = + ChatGoogleAI.do_process_response(model, response) + assert error.type == "unexpected_response" assert error.message == "Unexpected response" end @@ -766,4 +816,32 @@ defmodule ChatModels.ChatGoogleAITest do assert message.role == :assistant end end + + @tag live_call: true, live_google_ai: true + test "image classification with Google AI model" do + alias LangChain.Chains.LLMChain + alias LangChain.Message + alias LangChain.Message.ContentPart + alias LangChain.Utils.ChainResult + + model = ChatGoogleAI.new!(%{temperature: 0, stream: false, model: "gemini-1.5-flash"}) + + image_data = + File.read!("test/support/images/barn_owl.jpg") + |> Base.encode64() + + {:ok, updated_chain} = + %{llm: model, verbose: false, stream: false} + |> LLMChain.new!() + |> LLMChain.add_message( + Message.new_user!([ + ContentPart.text!("Please describe the image."), + ContentPart.image!(image_data, media: :jpg) + ]) + ) + |> LLMChain.run() + + {:ok, string} = ChainResult.to_string(updated_chain) + assert string =~ "owl" + end end From 064f7160294f56e5abefec60d45204b2bdf78b05 Mon Sep 17 00:00:00 2001 From: Victor Date: Thu, 12 Dec 2024 16:18:06 -0800 Subject: [PATCH 2/4] Fix specs and examples (#211) * Update documentation to use the LLMChain.run() return type * Fix specs * More spec fixes * More spec fixes * More spec fixes retry_count is supposed to be an non-negative integer, not a function. Since the function raises sometimes, added no_return() as well * Fix typo * Fix another typo * Update README.md Clarified the description for the list of models --- README.md | 16 +++++++++------- lib/chains/llm_chain.ex | 2 +- lib/chains/routing_chain.ex | 2 +- lib/chat_models/chat_anthropic.ex | 15 +++++++++------ lib/langchain_error.ex | 4 ++-- lib/prompt_template.ex | 2 +- lib/utils/chain_result.ex | 3 ++- lib/utils/chat_templates.ex | 2 +- 8 files changed, 26 insertions(+), 20 deletions(-) diff --git a/README.md b/README.md index dc07c05..6c5e038 100644 --- a/README.md +++ b/README.md @@ -3,11 +3,12 @@ Elixir LangChain enables Elixir applications to integrate AI services and self-hosted models into an application. Currently supported AI services: + - OpenAI ChatGPT - OpenAI DALL-e 2 - image generation - Anthropic Claude -- Google AI - https://generativelanguage.googleapis.com -- Google Vertex AI - Gemini +- Google Gemini +- Google Vertex AI (Google's enterprise offering) - Ollama - Mistral - Bumblebee self-hosted models - including Llama, Mistral and Zephyr @@ -114,6 +115,7 @@ fly secrets set ANTHROPIC_API_KEY=MyAnthropicApiKey ``` A list of models to use: + - [Anthropic Claude models](https://docs.anthropic.com/en/docs/about-claude/models) - [OpenAI models](https://platform.openai.com/docs/models) - [Gemini AI models](https://ai.google.dev/gemini-api/docs/models/gemini) @@ -139,6 +141,7 @@ alias LangChain.Function alias LangChain.Message alias LangChain.Chains.LLMChain alias LangChain.ChatModels.ChatOpenAI +alias LangChain.Utils.ChainResult # map of data we want to be passed as `context` to the function when # executed. @@ -171,7 +174,7 @@ custom_fn = }) # create and run the chain -{:ok, updated_chain, %Message{} = message} = +{:ok, updated_chain}} = LLMChain.new!(%{ llm: ChatOpenAI.new!(), custom_context: custom_context, @@ -182,8 +185,8 @@ custom_fn = |> LLMChain.run(mode: :while_needs_response) # print the LLM's answer -IO.puts(message.content) -#=> "The hairbrush is located in the drawer." +IO.puts(update |> ChainResult.to_string()) +# => "The hairbrush is located in the drawer." ``` ### Alternative OpenAI compatible APIs @@ -193,7 +196,7 @@ There are several services or self-hosted applications that provide an OpenAI co For example, if a locally running service provided that feature, the following code could connect to the service: ```elixir -{:ok, updated_chain, %Message{} = message} = +{:ok, updated_chain} = LLMChain.new!(%{ llm: ChatOpenAI.new!(%{endpoint: "http://localhost:1234/v1/chat/completions"}), }) @@ -243,4 +246,3 @@ Executing a specific test, whether it is a `live_call` or not, will execute it c When doing local development on the `LangChain` library itself, rename the `.envrc_template` to `.envrc` and populate it with your private API values. This is only used when running live test when explicitly requested. Use a tool like [Direnv](https://direnv.net/) or [Dotenv](https://github.com/motdotla/dotenv) to load the API values into the ENV when using the library locally. - diff --git a/lib/chains/llm_chain.ex b/lib/chains/llm_chain.ex index b0fd5a9..c5ac9b0 100644 --- a/lib/chains/llm_chain.ex +++ b/lib/chains/llm_chain.ex @@ -289,7 +289,7 @@ defmodule LangChain.Chains.LLMChain do Run the chain on the LLM using messages and any registered functions. This formats the request for a ChatLLMChain where messages are passed to the API. - When successful, it returns `{:ok, updated_chain, message_or_messages}` + When successful, it returns `{:ok, updated_chain}` ## Options diff --git a/lib/chains/routing_chain.ex b/lib/chains/routing_chain.ex index 4b97abb..d5e13b0 100644 --- a/lib/chains/routing_chain.ex +++ b/lib/chains/routing_chain.ex @@ -106,7 +106,7 @@ defmodule LangChain.Chains.RoutingChain do route. """ @spec run(t(), Keyword.t()) :: - {:ok, LLMChain.t(), Message.t() | [Message.t()]} | {:error, LangChainError.t()} + {:ok, LLMChain.t()} | {:error, LLMChain.t(), LangChainError.t()} def run(%RoutingChain{} = chain, opts \\ []) do default_name = chain.default_route.name diff --git a/lib/chat_models/chat_anthropic.ex b/lib/chat_models/chat_anthropic.ex index 3f91907..17bfb49 100644 --- a/lib/chat_models/chat_anthropic.ex +++ b/lib/chat_models/chat_anthropic.ex @@ -334,8 +334,8 @@ defmodule LangChain.ChatModels.ChatAnthropic do # # Retries the request up to 3 times on transient errors with a 1 second delay @doc false - @spec do_api_request(t(), [Message.t()], ChatModel.tools(), (any() -> any())) :: - list() | struct() | {:error, LangChainError.t()} + @spec do_api_request(t(), [Message.t()], ChatModel.tools(), non_neg_integer()) :: + list() | struct() | {:error, LangChainError.t()} | no_return() def do_api_request(anthropic, messages, tools, retry_count \\ 3) def do_api_request(_anthropic, _messages, _functions, 0) do @@ -435,13 +435,14 @@ defmodule LangChain.ChatModels.ChatAnthropic do data - # The error tuple was successfully received from the API. Unwrap it and - # return it as an error. + # The error tuple was successfully received from the API. Unwrap it and + # return it as an error. {:ok, {:error, %LangChainError{} = error}} -> {:error, error} {:error, %Req.TransportError{reason: :timeout} = err} -> - {:error, LangChainError.exception(type: "timeout", message: "Request timed out", original: err)} + {:error, + LangChainError.exception(type: "timeout", message: "Request timed out", original: err)} {:error, %Req.TransportError{reason: :closed}} -> # Force a retry by making a recursive call decrementing the counter @@ -623,7 +624,9 @@ defmodule LangChain.ChatModels.ChatAnthropic do LangChainError.exception(type: "invalid_json", message: error_message, original: response)} end - def do_process_response(%ChatAnthropic{bedrock: %BedrockConfig{}}, %{"message" => "Too many requests" <> _rest = message}) do + def do_process_response(%ChatAnthropic{bedrock: %BedrockConfig{}}, %{ + "message" => "Too many requests" <> _rest = message + }) do # the error isn't wrapped in an error JSON object. tsk, tsk {:error, LangChainError.exception(type: "too_many_requests", message: message)} end diff --git a/lib/langchain_error.ex b/lib/langchain_error.ex index 07d125f..d72a41a 100644 --- a/lib/langchain_error.ex +++ b/lib/langchain_error.ex @@ -30,7 +30,7 @@ defmodule LangChain.LangChainError do Create the exception using either a message or a changeset who's errors are converted to a message. """ - @spec exception(message :: String.t() | Ecto.Changeset.t()) :: t() | no_return() + @spec exception(message :: String.t() | Ecto.Changeset.t() | keyword()) :: t() | no_return() def exception(message) when is_binary(message) do %LangChainError{message: message} end @@ -44,7 +44,7 @@ defmodule LangChain.LangChainError do %LangChainError{ message: Keyword.fetch!(opts, :message), type: Keyword.get(opts, :type), - original: Keyword.get(opts, :original), + original: Keyword.get(opts, :original) } end end diff --git a/lib/prompt_template.ex b/lib/prompt_template.ex index b74a8dd..a03ccb1 100644 --- a/lib/prompt_template.ex +++ b/lib/prompt_template.ex @@ -323,7 +323,7 @@ defmodule LangChain.PromptTemplate do content. Raises an exception if invalid. """ @spec to_content_part!(t(), input :: %{atom() => any()}) :: - {:ok, Message.t()} | {:error, Ecto.Changeset.t()} + ContentPart.t() | no_return() def to_content_part!(%PromptTemplate{} = template, %{} = inputs \\ %{}) do content = PromptTemplate.format(template, inputs) ContentPart.new!(%{type: :text, content: content}) diff --git a/lib/utils/chain_result.ex b/lib/utils/chain_result.ex index c2ace2e..4eb1cc9 100644 --- a/lib/utils/chain_result.ex +++ b/lib/utils/chain_result.ex @@ -80,7 +80,8 @@ defmodule LangChain.Utils.ChainResult do @doc """ Write the result to the given map as the value of the given key. """ - @spec to_map(LLMChain.t(), map(), any()) :: {:ok, map()} | {:error, String.t()} + @spec to_map(LLMChain.t(), map(), any()) :: + {:ok, map()} | {:error, LLMChain.t(), LangChainError.t()} def to_map(%LLMChain{} = chain, map, key) do case ChainResult.to_string(chain) do {:ok, value} -> diff --git a/lib/utils/chat_templates.ex b/lib/utils/chat_templates.ex index beb05a3..77352db 100644 --- a/lib/utils/chat_templates.ex +++ b/lib/utils/chat_templates.ex @@ -108,7 +108,7 @@ defmodule LangChain.Utils.ChatTemplates do - Alternates message roles between: user, assistant, user, assistant, etc. """ @spec prep_and_validate_messages([Message.t()]) :: - {Message.t(), Message.t(), [Message.t()]} | no_return() + {Message.t() | nil, Message.t(), [Message.t()]} | no_return() def prep_and_validate_messages(messages) do {system, first_user, rest} = case messages do From 38e17e0e37309e1efe9cb3c2c448cd165c1e2614 Mon Sep 17 00:00:00 2001 From: Mark Ericksen Date: Sat, 14 Dec 2024 16:15:37 -0700 Subject: [PATCH 3/4] added documentation for ChatOpenAI use on Azure --- README.md | 2 ++ lib/chat_models/chat_open_ai.ex | 26 ++++++++++++++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/README.md b/README.md index 6c5e038..d91bb05 100644 --- a/README.md +++ b/README.md @@ -117,7 +117,9 @@ fly secrets set ANTHROPIC_API_KEY=MyAnthropicApiKey A list of models to use: - [Anthropic Claude models](https://docs.anthropic.com/en/docs/about-claude/models) +- [Anthropic models on AWS Bedrock](https://docs.anthropic.com/en/api/claude-on-amazon-bedrock#accessing-bedrock) - [OpenAI models](https://platform.openai.com/docs/models) +- [OpenAI models on Azure](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models) - [Gemini AI models](https://ai.google.dev/gemini-api/docs/models/gemini) ## Usage diff --git a/lib/chat_models/chat_open_ai.ex b/lib/chat_models/chat_open_ai.ex index 9fec7bb..227f280 100644 --- a/lib/chat_models/chat_open_ai.ex +++ b/lib/chat_models/chat_open_ai.ex @@ -82,6 +82,32 @@ defmodule LangChain.ChatModels.ChatOpenAI do tool_choice: %{"type" => "function", "function" => %{"name" => "get_weather"}} }) + ## Azure OpenAI Support + + To use `ChatOpenAI` with Microsoft's Azure hosted OpenAI models, the `endpoint` must be overridden and the API key needs to be provided in some way. The [MS Quickstart guide for REST access](https://learn.microsoft.com/en-us/azure/ai-services/openai/chatgpt-quickstart?tabs=command-line%2Cjavascript-keyless%2Ctypescript-keyless%2Cpython-new&pivots=rest-api) may be helpful. + + In order to use it, you must have an Azure account and from the console, a model must be deployed for your account. Use the Azure AI Foundry and Azure OpenAI Service to deploy the model you want to use. The entire URL is used as the `endpoint` and the provided `key` is used as the `api_key`. + + The following is an example of setting up `ChatOpenAI` for use with an Azure hosted model. + + endpoint = System.fetch_env!("AZURE_OPENAI_ENDPOINT") + api_key = System.fetch_env!("AZURE_OPENAI_KEY") + + llm = + ChatOpenAI.new!(%{ + endpoint: endpoint, + api_key: api_key, + seed: 0, + temperature: 1, + stream: false + }) + + The URL itself specifies the model to use and the `model` attribute is disregarded. + + A fake example URL for the endpoint value: + + `https://some-subdomain.cognitiveservices.azure.com/openai/deployments/gpt-4o-mini/chat/completions?api-version=2024-08-01-preview"` + """ use Ecto.Schema require Logger From 039ae65a1e8869f033d1af69e4c061db2ebab44c Mon Sep 17 00:00:00 2001 From: Mark Ericksen Date: Sat, 14 Dec 2024 17:12:48 -0700 Subject: [PATCH 4/4] Azure test for ChatOpenAI usage --- .envrc_template | 4 +++- test/chat_models/chat_open_ai_test.exs | 31 ++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/.envrc_template b/.envrc_template index 44b4605..de05e69 100644 --- a/.envrc_template +++ b/.envrc_template @@ -3,4 +3,6 @@ export OPENAI_ORG_ID="YOUR_OPENAI_ORG_ID" export ANTHROPIC_API_KEY="YOUR_ANTHROPIC_API_KEY" export GOOGLE_API_KEY="YOUR_GOOGLE_API_KEY" export AWS_ACCESS_KEY_ID="YOUR_AWS_ACCESS_KEY_ID" -export AWS_SECRET_ACCESS_KEY="YOUR_AWS_SECRET_ACCESS_KEY" \ No newline at end of file +export AWS_SECRET_ACCESS_KEY="YOUR_AWS_SECRET_ACCESS_KEY" +export AZURE_OPENAI_ENDPOINT="YOUR_AZURE_MODEL_ENDPOINT" +export AZURE_OPENAI_KEY="YOUR_AZURE_ENDPOINT_KEY" \ No newline at end of file diff --git a/test/chat_models/chat_open_ai_test.exs b/test/chat_models/chat_open_ai_test.exs index acd5a22..f600a85 100644 --- a/test/chat_models/chat_open_ai_test.exs +++ b/test/chat_models/chat_open_ai_test.exs @@ -917,6 +917,36 @@ defmodule LangChain.ChatModels.ChatOpenAITest do assert reason.type == nil assert reason.message =~ "maximum context length" end + + @tag live_call: true, live_azure: true + test "supports Azure hosted OpenAI models" do + # https://learn.microsoft.com/en-us/azure/ai-services/openai/chatgpt-quickstart?tabs=command-line%2Cjavascript-keyless%2Ctypescript-keyless%2Cpython-new&pivots=rest-api + + endpoint = System.fetch_env!("AZURE_OPENAI_ENDPOINT") + api_key = System.fetch_env!("AZURE_OPENAI_KEY") + + {:ok, chat} = + ChatOpenAI.new(%{ + endpoint: endpoint, + api_key: api_key, + seed: 0, + temperature: 1, + stream: false + }) + + {:ok, [message]} = + ChatOpenAI.call( + chat, + [ + Message.new_user!("Return the response 'Hi'.") + ], + [] + ) + + assert message.content =~ "Hi" + assert message.role == :assistant + assert message.index == 0 + end end describe "do_process_response/2" do @@ -1291,6 +1321,7 @@ defmodule LangChain.ChatModels.ChatOpenAITest do {:error, %LangChainError{} = reason} = ChatOpenAI.call(chat, [], []) assert reason.type == nil + assert reason.message == "Invalid 'messages': empty array. Expected an array with minimum length 1, but got an empty array instead." end