From a815053c9e58738e205dfc8ace088574eb399ed5 Mon Sep 17 00:00:00 2001 From: Michael Dawson Date: Sun, 27 Oct 2024 00:50:42 -0400 Subject: [PATCH] tests: support local models (#103) Add support for using a local model for testing. Signed-off-by: Michael Dawson --- CONTRIBUTING.md | 3 ++- tests/utils/llmFactory.ts | 28 ++++++++++++++++++++++++++++ 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index aeddeff2..79f85aad 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -61,12 +61,13 @@ yarn install 5. **Setup environmental variables:** To run E2E Tests, you should set the following variables in your `.env` file in the repository’s root. ```bash -# At least one provider API key must be defined! +# At least one provider API key or an OLLAMA_HOST must be defined! GENAI_API_KEY="" OPENAI_API_KEY="" GROQ_API_KEY="" WATSONX_API_KEY="" WATSONX_PROJECT_ID="" +OLLAMA_HOST="" WATSONX_SPACE_ID="" # optional WATSONX_DEPLOYMENT_ID="" # optional diff --git a/tests/utils/llmFactory.ts b/tests/utils/llmFactory.ts index 66904836..13dfd76f 100644 --- a/tests/utils/llmFactory.ts +++ b/tests/utils/llmFactory.ts @@ -20,6 +20,9 @@ import { BAMChatLLM } from "@/adapters/bam/chat.js"; import { OpenAIChatLLM } from "@/adapters/openai/chat.js"; import { WatsonXChatLLM } from "@/adapters/watsonx/chat.js"; import { GroqChatLLM } from "@/adapters/groq/chat.js"; +import { OllamaChatLLM } from "@/adapters/ollama/chat.js"; +import { Ollama } from "ollama"; +import { Agent, Dispatcher } from "undici"; export function createChatLLM(): ChatLLM { if (process.env.GENAI_API_KEY) { @@ -35,6 +38,31 @@ export function createChatLLM(): ChatLLM { modelId: `llama-3.1-70b-versatile`, parameters: { temperature: 0 }, }); + } else if (process.env.OLLAMA_HOST) { + // the undici definition of RequestInit does not extend the default + // fetch RequestInit so we can't use its type directly. Define + // and interface that adds the field we need to the default fetch + // interface to that we can make TypeScript accept it. + interface UndiciRequestInit extends RequestInit { + dispatcher: Dispatcher; + } + return new OllamaChatLLM({ + modelId: process.env.OLLAMA_MODEL ?? "llama3.1:8b", + parameters: { + temperature: 0, + }, + client: new Ollama({ + host: process.env.OLLAMA_HOST, + fetch: (input, init?) => { + const someInit = init || {}; + const requestInit: UndiciRequestInit = { + ...someInit, + dispatcher: new Agent({ headersTimeout: 2700000 }), + }; + return fetch(input, requestInit); + }, + }), + }); } else { throw new Error("No API key for any LLM provider has been provided. Cannot run test case."); }