Skip to content

Commit

Permalink
Merge dev for v1.0.2 release (#908)
Browse files Browse the repository at this point in the history
  • Loading branch information
chriswilty authored Apr 29, 2024
2 parents d920f7a + 9b0912c commit 40bab47
Show file tree
Hide file tree
Showing 15 changed files with 441 additions and 254 deletions.
42 changes: 22 additions & 20 deletions backend/package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion backend/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
"eslint-plugin-import": "^2.29.1",
"eslint-plugin-jest": "^28.2.0",
"prettier": "^3.2.5",
"supertest": "^6.3.4",
"supertest": "^7.0.0",
"ts-jest": "^29.1.2",
"tsx": "^4.7.2",
"typescript": "^5.4.5"
Expand Down
6 changes: 3 additions & 3 deletions backend/src/controller/chatController.ts
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ import {
import { DEFENCE_ID, Defence, QaLlmDefence } from '@src/models/defence';
import { EmailInfo } from '@src/models/email';
import { LEVEL_NAMES } from '@src/models/level';
import { chatGptSendMessage } from '@src/openai';
import { chatModelSendMessage } from '@src/openai';
import {
pushMessageToHistory,
setSystemRoleInChatHistory,
Expand Down Expand Up @@ -104,7 +104,7 @@ async function handleChatWithoutDefenceDetection(
chatHistory
);

const openAiReply = await chatGptSendMessage(
const openAiReply = await chatModelSendMessage(
updatedChatHistory,
chatModel,
currentLevel
Expand Down Expand Up @@ -151,7 +151,7 @@ async function handleChatWithDefenceDetection(
(defence) => defence.id === DEFENCE_ID.QA_LLM
) as QaLlmDefence | undefined;

const openAiReplyPromise = chatGptSendMessage(
const openAiReplyPromise = chatModelSendMessage(
chatHistoryWithNewUserMessages,
chatModel,
currentLevel,
Expand Down
33 changes: 15 additions & 18 deletions backend/src/document.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
import { OpenAIEmbeddings } from '@langchain/openai';
import { CSVLoader } from 'langchain/document_loaders/fs/csv';
import { DirectoryLoader } from 'langchain/document_loaders/fs/directory';
import { PDFLoader } from 'langchain/document_loaders/fs/pdf';
import {
DirectoryLoader,
UnknownHandling,
} from 'langchain/document_loaders/fs/directory';
import { TextLoader } from 'langchain/document_loaders/fs/text';
import { RecursiveCharacterTextSplitter } from 'langchain/text_splitter';
import { MemoryVectorStore } from 'langchain/vectorstores/memory';
Expand All @@ -14,25 +16,20 @@ import { LEVEL_NAMES } from './models/level';
async function getDocuments(filePath: string) {
console.debug(`Loading documents from: ${filePath}`);

const loader: DirectoryLoader = new DirectoryLoader(filePath, {
'.pdf': (path: string) => new PDFLoader(path),
'.txt': (path: string) => new TextLoader(path),
'.csv': (path: string) => new CSVLoader(path),
});
const loader: DirectoryLoader = new DirectoryLoader(
filePath,
{
'.txt': (path: string) => new TextLoader(path),
// CSV loader returns one document per row
'.csv': (path: string) => new CSVLoader(path),
},
true,
UnknownHandling.Warn
);
const docs = await loader.load();
console.debug(`${docs.length} documents found`);

// split the documents into chunks
const textSplitter = new RecursiveCharacterTextSplitter({
chunkSize: 1000,
chunkOverlap: 0,
});

// need to disable the linting rule here
// as it's possible for the splitDocuments method to return undefined
// despite what the return type says
// eslint-disable-next-line @typescript-eslint/no-unnecessary-condition
return (await textSplitter.splitDocuments(docs)) ?? [];
return new RecursiveCharacterTextSplitter().splitDocuments(docs);
}

function getFilepath(target: LEVEL_NAMES | 'common') {
Expand Down
8 changes: 5 additions & 3 deletions backend/src/langchain.ts
Original file line number Diff line number Diff line change
Expand Up @@ -50,9 +50,11 @@ function initQAModel(level: LEVEL_NAMES, Prompt: string) {
'QA prompt template'
);
console.debug(`QA chain initialised with model: ${modelName}`);
return RetrievalQAChain.fromLLM(model, documentVectors.asRetriever(), {
prompt: promptTemplate,
});
return RetrievalQAChain.fromLLM(
model,
documentVectors.asRetriever(documentVectors.memoryVectors.length),
{ prompt: promptTemplate }
);
}

function initPromptEvaluationModel(configPromptEvaluationPrompt: string) {
Expand Down
20 changes: 3 additions & 17 deletions backend/src/models/chat.ts
Original file line number Diff line number Diff line change
Expand Up @@ -53,23 +53,11 @@ interface FunctionCallResponse {
}

interface ToolCallResponse {
functionCallReply?: FunctionCallResponse;
chatResponse?: ChatResponse;
chatHistory: ChatMessage[];
sentEmails: EmailInfo[];
}

interface ChatMalicious {
isMalicious: boolean;
reason: string;
}

type ChatResponse = {
completion: ChatCompletionMessageParam | null;
openAIErrorMessage: string | null;
};

type ChatGptReply = {
chatHistory: ChatMessage[];
type ChatModelReply = {
completion: ChatCompletionAssistantMessageParam | null;
openAIErrorMessage: string | null;
};
Expand Down Expand Up @@ -116,11 +104,9 @@ const defaultChatModel: ChatModel = {

export type {
DefenceReport,
ChatGptReply,
ChatMalicious,
ChatModel,
ChatModelConfigurations,
ChatResponse,
ChatModelReply,
LevelHandlerResponse,
ChatHttpResponse,
TransformedChatMessage,
Expand Down
3 changes: 2 additions & 1 deletion backend/src/models/chatMessage.ts
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,9 @@ type ChatMessage = ChatInfoMessage | ChatCompletionMessage;

export type {
ChatMessage,
ChatSystemMessage,
ChatCompletionMessage,
ChatInfoMessage,
ChatSystemMessage,
CHAT_INFO_MESSAGE_TYPES,
};

Expand Down
Loading

0 comments on commit 40bab47

Please sign in to comment.