From b0d751bfda937ec3ecd3a1e6145d7b517475cdaa Mon Sep 17 00:00:00 2001 From: Mini256 Date: Wed, 27 Nov 2024 12:33:04 +0800 Subject: [PATCH] feat: support kb document manage --- .../app/api/admin_routes/document/routes.py | 4 +- .../knowledge_base/document/__init__.py | 0 .../{ => knowledge_base}/document/models.py | 8 +- .../knowledge_base/document/routes.py | 153 +++++++++++++ .../api/admin_routes/knowledge_base/routes.py | 123 ++--------- backend/app/exceptions.py | 13 ++ backend/app/rag/knowledge_base/retrieve.py | 207 ++++++++++++++++++ backend/app/repositories/llm.py | 35 ++- backend/app/repositories/reranker.py | 37 ++++ 9 files changed, 462 insertions(+), 118 deletions(-) create mode 100644 backend/app/api/admin_routes/knowledge_base/document/__init__.py rename backend/app/api/admin_routes/{ => knowledge_base}/document/models.py (93%) create mode 100644 backend/app/api/admin_routes/knowledge_base/document/routes.py create mode 100644 backend/app/rag/knowledge_base/retrieve.py create mode 100644 backend/app/repositories/reranker.py diff --git a/backend/app/api/admin_routes/document/routes.py b/backend/app/api/admin_routes/document/routes.py index 67e78da6..bd7619c7 100644 --- a/backend/app/api/admin_routes/document/routes.py +++ b/backend/app/api/admin_routes/document/routes.py @@ -3,12 +3,10 @@ from fastapi import APIRouter, Depends, Query from fastapi_pagination import Params, Page -from app.api.admin_routes.document.models import DocumentItem +from app.api.admin_routes.knowledge_base.document.models import DocumentFilters, DocumentItem from app.api.deps import SessionDep, CurrentSuperuserDep from app.repositories import document_repo -from app.repositories.document import DocumentFilters - router = APIRouter() diff --git a/backend/app/api/admin_routes/knowledge_base/document/__init__.py b/backend/app/api/admin_routes/knowledge_base/document/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/backend/app/api/admin_routes/document/models.py b/backend/app/api/admin_routes/knowledge_base/document/models.py similarity index 93% rename from backend/app/api/admin_routes/document/models.py rename to backend/app/api/admin_routes/knowledge_base/document/models.py index ff069131..cf34d1a7 100644 --- a/backend/app/api/admin_routes/document/models.py +++ b/backend/app/api/admin_routes/knowledge_base/document/models.py @@ -43,4 +43,10 @@ class DocumentItem(BaseModel): knowledge_base: KnowledgeBaseDescriptor | None last_modified_at: datetime created_at: datetime - updated_at: datetime \ No newline at end of file + updated_at: datetime + + +class KBDocumentUpload(BaseModel): + file_id: int + file_name: str + diff --git a/backend/app/api/admin_routes/knowledge_base/document/routes.py b/backend/app/api/admin_routes/knowledge_base/document/routes.py new file mode 100644 index 00000000..a7d94f21 --- /dev/null +++ b/backend/app/api/admin_routes/knowledge_base/document/routes.py @@ -0,0 +1,153 @@ +import logging +from typing import Annotated + +from fastapi import APIRouter, Depends, Query +from fastapi_pagination import Params, Page + +from app.api.admin_routes.knowledge_base.data_sources.models import KBDataSource +from app.api.admin_routes.knowledge_base.document.models import KBDocumentUpload, DocumentFilters, DocumentItem +from app.api.admin_routes.knowledge_base.models import ChunkItem +from app.api.admin_routes.knowledge_base.routes import logger +from app.api.deps import SessionDep, CurrentSuperuserDep +from app.exceptions import ( + InternalServerError, + KBDataSourceNotFoundError, + KnowledgeBaseNotFoundError +) +from app.models import DataSource +from app.models.chunk import get_kb_chunk_model +from app.repositories import knowledge_base_repo, document_repo +from app.repositories.chunk import ChunkRepo +from app.tasks import build_index_for_document, build_kg_index_for_chunk +from app.tasks.knowledge_base import ( + import_documents_from_kb_datasource, + purge_kb_datasource_related_resources +) + + +router = APIRouter() +logger = logging.getLogger(__name__) + + +@router.post("/admin/knowledge_bases/{kb_id}/documents/upload") +def upload_kb_document( + session: SessionDep, + user: CurrentSuperuserDep, + kb_id: int, + upload: KBDocumentUpload +) -> KBDataSource: + try: + kb = knowledge_base_repo.must_get(session, kb_id) + new_data_source = DataSource( + name=upload.name, + description="", + data_source_type=upload.data_source_type, + config=upload.config, + ) + new_data_source = knowledge_base_repo.add_kb_datasource(session, kb, new_data_source) + + import_documents_from_kb_datasource.delay(kb_id, new_data_source.id) + + return new_data_source + except KnowledgeBaseNotFoundError as e: + raise e + except Exception as e: + logger.error(f"Failed to create data source for knowledge base #{kb_id}: {e}", exc_info=e) + raise InternalServerError() + + +@router.get("/admin/knowledge_bases/{kb_id}/documents") +def list_kb_documents( + session: SessionDep, + user: CurrentSuperuserDep, + kb_id: int, + filters: Annotated[DocumentFilters, Query()], + params: Params = Depends(), +) -> Page[DocumentItem]: + try: + kb = knowledge_base_repo.must_get(session, kb_id) + filters.knowledge_base_id = kb.id + return document_repo.paginate( + session=session, + filters=filters, + params=params, + ) + except KnowledgeBaseNotFoundError as e: + raise e + except Exception as e: + logger.exception(e) + raise InternalServerError() + + +@router.get("/admin/knowledge_bases/{kb_id}/documents/{doc_id}/chunks") +def list_kb_chunks( + session: SessionDep, + user: CurrentSuperuserDep, + kb_id: int, + doc_id: int, +) -> list[ChunkItem]: + try: + kb = knowledge_base_repo.must_get(session, kb_id) + chunk_repo = ChunkRepo(get_kb_chunk_model(kb)) + return chunk_repo.get_document_chunks(session, doc_id) + except KnowledgeBaseNotFoundError as e: + raise e + except Exception as e: + logger.exception(e) + raise InternalServerError() + + +@router.post("/admin/knowledge_bases/{kb_id}/documents/reindex") +def batch_reindex_kb_documents( + session: SessionDep, + user: CurrentSuperuserDep, + kb_id: int, + document_ids: list[int] +) -> dict: + try: + kb = knowledge_base_repo.must_get(session, kb_id) + chunk_repo = ChunkRepo(get_kb_chunk_model(kb)) + + for document_id in document_ids: + build_index_for_document.delay(kb.id, document_id) + + chunks = chunk_repo.get_document_chunks(session, document_id) + for chunk in chunks: + build_kg_index_for_chunk.delay(kb.id, chunk.id) + + return { + "detail": f"Triggered {len(document_ids)} documents to reindex knowledge base #{kb_id} successfully" + } + except KnowledgeBaseNotFoundError as e: + raise e + except Exception as e: + logger.exception(e) + raise InternalServerError() + + +@router.delete("/admin/knowledge_bases/{kb_id}/documents/{document_id}") +def remove_kb_document( + session: SessionDep, + user: CurrentSuperuserDep, + kb_id: int, + data_source_id: int, +): + try: + kb = knowledge_base_repo.must_get(session, kb_id) + data_source = kb.must_get_data_source_by_id(data_source_id) + + # Flag the data source to be deleted, it will be deleted completely by the background job. + knowledge_base_repo.remove_kb_document(session, kb, data_source) + + purge_kb_datasource_related_resources.delay(kb_id, data_source_id) + + return { + "detail": "success" + } + except KnowledgeBaseNotFoundError as e: + raise e + except KBDataSourceNotFoundError as e: + raise e + except Exception as e: + logger.error(f"Failed to remove data source #{data_source_id} from knowledge base #{kb_id}: {e}", exc_info=e) + raise InternalServerError() \ No newline at end of file diff --git a/backend/app/api/admin_routes/knowledge_base/routes.py b/backend/app/api/admin_routes/knowledge_base/routes.py index 0a1212be..22734138 100644 --- a/backend/app/api/admin_routes/knowledge_base/routes.py +++ b/backend/app/api/admin_routes/knowledge_base/routes.py @@ -1,28 +1,22 @@ import logging -from typing import Annotated -from fastapi import APIRouter, Depends, logger, Query +from fastapi import APIRouter, Depends from fastapi_pagination import Params, Page -from app.models.chunk import get_kb_chunk_model from app.rag.knowledge_base.index_store import init_kb_tidb_vector_store, init_kb_tidb_graph_store -from app.repositories.chunk import ChunkRepo -from app.repositories.embedding_model import embedding_model_repo -from app.repositories.llm import get_default_db_llm - +from app.repositories.embedding_model import embed_model_repo +from app.repositories.llm import llm_repo from .models import ( KnowledgeBaseDetail, KnowledgeBaseItem, - KnowledgeBaseCreate, ChunkItem, KnowledgeBaseUpdate, VectorIndexError, KGIndexError + KnowledgeBaseCreate, KnowledgeBaseUpdate, VectorIndexError, KGIndexError ) from app.api.deps import SessionDep, CurrentSuperuserDep from app.exceptions import ( InternalServerError, KnowledgeBaseNotFoundError, - KBNoVectorIndexConfiguredError, - KBNoLLMConfiguredError, - KBNoEmbedModelConfiguredError + KBNoVectorIndexConfiguredError ) from app.models import ( KnowledgeBase, @@ -32,10 +26,12 @@ build_kg_index_for_chunk, build_index_for_document, ) -from app.repositories import knowledge_base_repo, data_source_repo, document_repo -from app.tasks.knowledge_base import import_documents_for_knowledge_base, purge_knowledge_base_related_resources, \ - stats_for_knowledge_base -from ..document.models import DocumentItem, DocumentFilters +from app.repositories import knowledge_base_repo, data_source_repo +from app.tasks.knowledge_base import ( + import_documents_for_knowledge_base, + stats_for_knowledge_base, + purge_knowledge_base_related_resources +) router = APIRouter() logger = logging.getLogger(__name__) @@ -52,34 +48,24 @@ def create_knowledge_base( data_source_repo.create(session, DataSource( name=data_source.name, description='', + user_id=user.id, data_source_type=data_source.data_source_type, config=data_source.config, )) for data_source in create.data_sources ] - db_llm_id = create.llm_id - if not db_llm_id: - default_llm = get_default_db_llm(session) - if default_llm: - db_llm_id = default_llm.id - else: - raise KBNoLLMConfiguredError() - - db_embed_model_id = create.embedding_model_id - if not db_embed_model_id: - default_embed_model = embedding_model_repo.get_default_model(session) - if default_embed_model: - db_embed_model_id = default_embed_model.id - else: - raise KBNoEmbedModelConfiguredError() + if not create.llm_id: + create.llm_id = llm_repo.must_get_default_llm(session).id + if not create.embedding_model_id: + create.embedding_model_id = embed_model_repo.must_get_default_model(session).id knowledge_base = KnowledgeBase( name=create.name, description=create.description, index_methods=create.index_methods, - llm_id=db_llm_id, - embedding_model_id=db_embed_model_id, + llm_id=create.llm_id, + embedding_model_id=create.embedding_model_id, data_sources=data_sources, created_by=user.id, updated_by=user.id, @@ -97,7 +83,7 @@ def create_knowledge_base( except KBNoVectorIndexConfiguredError as e: raise e except Exception as e: - logging.exception(e) + logger.exception(e) raise InternalServerError() @@ -141,7 +127,7 @@ def update_knowledge_base_setting( except KBNoVectorIndexConfiguredError as e: raise e except Exception as e: - logging.exception(e) + logger.exception(e) raise InternalServerError() @@ -190,75 +176,6 @@ def get_knowledge_base_index_overview( raise InternalServerError() -@router.get("/admin/knowledge_bases/{kb_id}/documents") -def list_knowledge_base_documents( - session: SessionDep, - user: CurrentSuperuserDep, - kb_id: int, - filters: Annotated[DocumentFilters, Query()], - params: Params = Depends(), -) -> Page[DocumentItem]: - try: - kb = knowledge_base_repo.must_get(session, kb_id) - filters.knowledge_base_id = kb.id - return document_repo.paginate( - session=session, - filters=filters, - params=params, - ) - except KnowledgeBaseNotFoundError as e: - raise e - except Exception as e: - logger.exception(e) - raise InternalServerError() - - -@router.get("/admin/knowledge_bases/{kb_id}/documents/{doc_id}/chunks") -def list_knowledge_base_chunks( - session: SessionDep, - user: CurrentSuperuserDep, - kb_id: int, - doc_id: int, -) -> list[ChunkItem]: - try: - kb = knowledge_base_repo.must_get(session, kb_id) - chunk_repo = ChunkRepo(get_kb_chunk_model(kb)) - return chunk_repo.get_document_chunks(session, doc_id) - except KnowledgeBaseNotFoundError as e: - raise e - except Exception as e: - logger.exception(e) - raise InternalServerError() - - -@router.post("/admin/knowledge_bases/{kb_id}/documents/reindex") -def batch_reindex_knowledge_base_documents( - session: SessionDep, - user: CurrentSuperuserDep, - kb_id: int, - document_ids: list[int] -) -> dict: - try: - kb = knowledge_base_repo.must_get(session, kb_id) - chunk_repo = ChunkRepo(get_kb_chunk_model(kb)) - - for document_id in document_ids: - build_index_for_document.delay(kb.id, document_id) - - chunks = chunk_repo.get_document_chunks(session, document_id) - for chunk in chunks: - build_kg_index_for_chunk.delay(kb.id, chunk.id) - - return { - "detail": f"Triggered {len(document_ids)} documents to reindex knowledge base #{kb_id} successfully" - } - except KnowledgeBaseNotFoundError as e: - raise e - except Exception as e: - logger.exception(e) - raise InternalServerError() - - @router.get("/admin/knowledge_bases/{kb_id}/vector-index-errors") def list_kb_vector_index_errors( session: SessionDep, diff --git a/backend/app/exceptions.py b/backend/app/exceptions.py index 143ce009..eef0cf0b 100644 --- a/backend/app/exceptions.py +++ b/backend/app/exceptions.py @@ -22,6 +22,19 @@ def __init__(self, knowledge_base_id: int): self.detail = f"llm #{knowledge_base_id} is not found" +class DBRerankerNotFoundError(HTTPException): + status_code = 404 + + def __init__(self, reranker_id: int): + self.detail = f"reranker #{reranker_id} is not found" + +class DefaultRerankerNotFoundError(HTTPException): + status_code = 404 + + def __init__(self): + self.detail = f"default reranker is not found" + + class KnowledgeBaseNotFoundError(HTTPException): status_code = 404 diff --git a/backend/app/rag/knowledge_base/retrieve.py b/backend/app/rag/knowledge_base/retrieve.py new file mode 100644 index 00000000..94f9ac32 --- /dev/null +++ b/backend/app/rag/knowledge_base/retrieve.py @@ -0,0 +1,207 @@ +import logging +from typing import List, Optional, Type + +from llama_index.core import VectorStoreIndex +from llama_index.core.schema import NodeWithScore +from pydantic import BaseModel +from sqlmodel import Session, select + +from app.models import ( + Document as DBDocument, + Chunk as DBChunk, + Entity as DBEntity, + Relationship as DBRelationship, +) +from app.models.chunk import get_kb_chunk_model +from app.models.entity import get_kb_entity_model +from app.rag.chat import get_prompt_by_jinja2_template +from app.rag.chat_config import get_default_embedding_model, KnowledgeGraphOption +from app.models.relationship import get_kb_relationship_model +from app.models.patch.sql_model import SQLModel +from app.rag.knowledge_base.config import get_kb_embed_model +from app.rag.knowledge_graph import KnowledgeGraphIndex +from app.rag.knowledge_graph.graph_store import TiDBGraphStore +from app.rag.vector_store.tidb_vector_store import TiDBVectorStore +from app.repositories.knowledge_base import knowledge_base_repo +from app.repositories.llm import llm_repo + +logger = logging.getLogger(__name__) + +class RetrievalQuery(BaseModel): + query: str + top_k: int = 5 + + +class RetrievalConfig(BaseModel): + knowledge_base: int + llm_id: Optional[int] = None + fast_llm_id: Optional[int] = None + reranker_id: Optional[int] = None + knowledge_graph: Optional[KnowledgeGraphOption] = None + + +class RetrievalRequest(BaseModel): + query: str + config: RetrievalConfig + + +class KBRetrieveService: + _chunk_model: Type[SQLModel] = DBChunk + _entity_model: Type[SQLModel] = DBEntity + _relationship_model: Type[SQLModel] = DBRelationship + + def __init__( + self, + db_session: Session, + config: RetrievalConfig, + ) -> None: + + if config.llm_id: + self._llm = llm_repo.must_get_llm(db_session, config.llm_id) + else: + self._llm = llm_repo.must_get_default_llm(db_session) + + if config.fast_llm_id: + self._fast_llm = llm_repo.must_get_llm(db_session, config.fast_llm_id) + else: + self._fast_llm = self._llm + + if config.reranker_id: + self._reranker = + + _llm = self.chat_engine_config.get_llama_llm(db_session) + _fast_llm = self.chat_engine_config.get_fast_llama_llm(db_session) + _fast_dspy_lm = self.chat_engine_config.get_fast_dspy_lm(db_session) + self._reranker = self.chat_engine_config.get_reranker(db_session) + + if self.chat_engine_config.knowledge_base: + # TODO: Support multiple knowledge base retrieve. + linked_knowledge_base = self.chat_engine_config.knowledge_base.linked_knowledge_base + kb = knowledge_base_repo.must_get(db_session, linked_knowledge_base.id) + self._chunk_model = get_kb_chunk_model(kb) + self._entity_model = get_kb_entity_model(kb) + self._relationship_model = get_kb_relationship_model(kb) + self._embed_model = get_kb_embed_model(self.db_session, kb) + else: + self._embed_model = get_default_embedding_model(self.db_session) + + + def retrieve(self, question: str, top_k: int = 10) -> List[DBDocument]: + try: + return self._retrieve(question, top_k) + except Exception as e: + logger.exception(e) + + def _retrieve(self, question: str, top_k: int) -> List[DBDocument]: + # 1. Retrieve entities, relations, and chunks from the knowledge graph + kg_config = self.chat_engine_config.knowledge_graph + if kg_config.enabled: + graph_store = TiDBGraphStore( + dspy_lm=_fast_dspy_lm, + session=self.db_session, + embed_model=self._embed_model, + entity_db_model=self._entity_model, + relationship_db_model=self._relationship_model, + ) + graph_index: KnowledgeGraphIndex = KnowledgeGraphIndex.from_existing( + dspy_lm=_fast_dspy_lm, + kg_store=graph_store, + ) + + if kg_config.using_intent_search: + sub_queries = graph_index.intent_analyze(question) + result = graph_index.graph_semantic_search( + sub_queries, include_meta=True + ) + + graph_knowledges = get_prompt_by_jinja2_template( + self.chat_engine_config.llm.intent_graph_knowledge, + sub_queries=result["queries"], + ) + graph_knowledges_context = graph_knowledges.template + else: + entities, relations, chunks = graph_index.retrieve_with_weight( + question, + [], + depth=kg_config.depth, + include_meta=kg_config.include_meta, + with_degree=kg_config.with_degree, + with_chunks=False, + ) + graph_knowledges = get_prompt_by_jinja2_template( + self.chat_engine_config.llm.normal_graph_knowledge, + entities=entities, + relationships=relations, + ) + graph_knowledges_context = graph_knowledges.template + else: + entities, relations, chunks = [], [], [] + graph_knowledges_context = "" + + # 2. Refine the user question using graph information and chat history + refined_question = _fast_llm.predict( + get_prompt_by_jinja2_template( + self.chat_engine_config.llm.condense_question_prompt, + graph_knowledges=graph_knowledges_context, + question=question, + ), + ) + + # 3. Retrieve the related chunks from the vector store + # 4. Rerank after the retrieval + # 5. Generate a response using the refined question and related chunks + text_qa_template = get_prompt_by_jinja2_template( + self.chat_engine_config.llm.text_qa_prompt, + graph_knowledges=graph_knowledges_context, + ) + refine_template = get_prompt_by_jinja2_template( + self.chat_engine_config.llm.refine_prompt, + graph_knowledges=graph_knowledges_context, + ) + vector_store = TiDBVectorStore(session=self.db_session, chunk_db_model=self._chunk_model) + vector_index = VectorStoreIndex.from_vector_store( + vector_store, + embed_model=self._embed_model, + ) + + retrieve_engine = vector_index.as_retriever( + node_postprocessors=[self._reranker], + streaming=True, + text_qa_template=text_qa_template, + refine_template=refine_template, + similarity_top_k=top_k, + ) + + node_list: List[NodeWithScore] = retrieve_engine.retrieve(refined_question) + source_documents = self._get_source_documents(node_list) + + return source_documents + + def _embedding_retrieve(self, question: str, top_k: int) -> List[NodeWithScore]: + vector_store = TiDBVectorStore(session=self.db_session, chunk_db_model=self._chunk_model) + vector_index = VectorStoreIndex.from_vector_store( + vector_store, + embed_model=self._embed_model + ) + + retrieve_engine = vector_index.as_retriever( + node_postprocessors=[self._reranker], + similarity_top_k=top_k, + ) + + node_list: List[NodeWithScore] = retrieve_engine.retrieve(question) + return node_list + + def _get_source_documents(self, node_list: List[NodeWithScore]) -> List[DBDocument]: + source_nodes_ids = [s_n.node_id for s_n in node_list] + stmt = select(DBDocument).where( + DBDocument.id.in_( + select( + self._chunk_model.document_id, + ).where( + self._chunk_model.id.in_(source_nodes_ids), + ) + ), + ) + + return list(self.db_session.exec(stmt).all()) diff --git a/backend/app/repositories/llm.py b/backend/app/repositories/llm.py index 189d3472..8a229aac 100644 --- a/backend/app/repositories/llm.py +++ b/backend/app/repositories/llm.py @@ -2,23 +2,36 @@ from sqlmodel import select, Session -from app.exceptions import DBLLMNotFoundError +from app.exceptions import DefaultLLMNotFoundError from app.models import ( LLM as DBLLM ) -def get_db_llm(session: Session, llm_id: int) -> Type[DBLLM] | None: - return session.get(DBLLM, llm_id) +class LLMRepo: + model_cls: DBLLM + def get_db_llm(self, session: Session, llm_id: int) -> Type[DBLLM] | None: + return session.get(DBLLM, llm_id) -def must_get_llm(session: Session, llm_id: int) -> Type[DBLLM]: - db_llm = get_db_llm(session, llm_id) - if db_llm is None: - raise DBLLMNotFoundError(llm_id) - return db_llm + def must_get_llm(self, session: Session, llm_id: int) -> Type[DBLLM]: + db_llm = self.get_db_llm(session, llm_id) + if db_llm is None: + raise DefaultLLMNotFoundError(llm_id) + return db_llm -def get_default_db_llm(session: Session) -> Type[DBLLM] | None: - stmt = select(DBLLM).where(DBLLM.is_default == True).order_by(DBLLM.updated_at.desc()).limit(1) - return session.exec(stmt).first() + + def get_default_llm(self, session: Session) -> Type[DBLLM] | None: + stmt = select(DBLLM).where(DBLLM.is_default == True).order_by(DBLLM.updated_at.desc()).limit(1) + return session.exec(stmt).first() + + + def must_get_default_llm(self, session: Session) -> Type[DBLLM]: + db_llm = self.get_default_llm(session) + if db_llm is None: + raise DefaultLLMNotFoundError() + return db_llm + + +llm_repo = LLMRepo() \ No newline at end of file diff --git a/backend/app/repositories/reranker.py b/backend/app/repositories/reranker.py new file mode 100644 index 00000000..8a1527f3 --- /dev/null +++ b/backend/app/repositories/reranker.py @@ -0,0 +1,37 @@ +from typing import Type + +from sqlmodel import select, Session + +from app.exceptions import DBRerankerNotFoundError, DefaultRerankerNotFoundError +from app.models import ( + RerankerModel as DBReranker +) + + +class RerankerRepo: + model_cls: DBReranker + + def get_db_reranker(self, session: Session, reranker_id: int) -> Type[DBReranker] | None: + return session.get(DBReranker, reranker_id) + + + def must_get_reranker(self, session: Session, reranker_id: int) -> Type[DBReranker]: + db_reranker = self.get_db_reranker(session, reranker_id) + if db_reranker is None: + raise DBRerankerNotFoundError(reranker_id) + return db_reranker + + + def get_default_reranker(self, session: Session) -> Type[DBReranker] | None: + stmt = select(DBReranker).where(DBReranker.is_default == True).order_by(DBReranker.updated_at.desc()).limit(1) + return session.exec(stmt).first() + + + def must_get_default_reranker(self, session: Session) -> Type[DBReranker]: + db_reranker = self.get_default_reranker(session) + if db_reranker is None: + raise DefaultRerankerNotFoundError() + return db_reranker + + +reranker_repo = RerankerRepo() \ No newline at end of file