diff --git a/.github/workflows/cli-ci.yml b/.github/workflows/cli-ci.yml index c60ba468e..863778e7e 100644 --- a/.github/workflows/cli-ci.yml +++ b/.github/workflows/cli-ci.yml @@ -96,7 +96,7 @@ jobs: services: postgres: - image: postgres:latest + image: ankane/pgvector:latest env: POSTGRES_USER: grai POSTGRES_PASSWORD: grai diff --git a/.github/workflows/client-ci.yml b/.github/workflows/client-ci.yml index fbc085783..f45ab5963 100644 --- a/.github/workflows/client-ci.yml +++ b/.github/workflows/client-ci.yml @@ -91,7 +91,7 @@ jobs: working-directory: ${{ env.project_dir }} services: postgres: - image: postgres:latest + image: ankane/pgvector:latest env: POSTGRES_USER: grai POSTGRES_PASSWORD: grai diff --git a/.github/workflows/graph-ci.yml b/.github/workflows/graph-ci.yml index 63546efbe..2eb6a4454 100644 --- a/.github/workflows/graph-ci.yml +++ b/.github/workflows/graph-ci.yml @@ -44,7 +44,7 @@ jobs: working-directory: ${{ env.project_dir }} services: postgres: - image: postgres:latest + image: ankane/pgvector:latest env: POSTGRES_USER: grai POSTGRES_PASSWORD: grai diff --git a/.github/workflows/integration-bigquery-ci.yml b/.github/workflows/integration-bigquery-ci.yml index 677a85652..06798dd1b 100644 --- a/.github/workflows/integration-bigquery-ci.yml +++ b/.github/workflows/integration-bigquery-ci.yml @@ -46,7 +46,7 @@ jobs: # working-directory: ${{ env.project_dir }} # services: # postgres: -# image: postgres:latest +# image: ankane/pgvector:latest # env: # POSTGRES_USER: grai # POSTGRES_PASSWORD: grai diff --git a/.github/workflows/integration-fivetran-ci.yml b/.github/workflows/integration-fivetran-ci.yml index c642f155f..a9fe106ca 100755 --- a/.github/workflows/integration-fivetran-ci.yml +++ b/.github/workflows/integration-fivetran-ci.yml @@ -39,7 +39,7 @@ jobs: working-directory: ${{ env.project_dir }} services: postgres: - image: postgres:latest + image: ankane/pgvector:latest env: POSTGRES_USER: grai POSTGRES_PASSWORD: grai diff --git a/.github/workflows/integration-flat_file-ci.yml b/.github/workflows/integration-flat_file-ci.yml index d7de76a6c..beb5d1e9e 100644 --- a/.github/workflows/integration-flat_file-ci.yml +++ b/.github/workflows/integration-flat_file-ci.yml @@ -46,7 +46,7 @@ jobs: working-directory: ${{ env.project_dir }} services: postgres: - image: postgres:latest + image: ankane/pgvector:latest env: POSTGRES_USER: grai POSTGRES_PASSWORD: grai diff --git a/.github/workflows/integration-postgres-ci.yml b/.github/workflows/integration-postgres-ci.yml index f9bbdf554..0005dd2c1 100644 --- a/.github/workflows/integration-postgres-ci.yml +++ b/.github/workflows/integration-postgres-ci.yml @@ -48,7 +48,7 @@ jobs: working-directory: ${{ env.project_dir }} services: postgres: - image: postgres:latest + image: ankane/pgvector:latest env: POSTGRES_USER: grai POSTGRES_PASSWORD: grai diff --git a/.github/workflows/integration-snowflake-ci.yml b/.github/workflows/integration-snowflake-ci.yml index 77d4839b8..e10eeef0a 100644 --- a/.github/workflows/integration-snowflake-ci.yml +++ b/.github/workflows/integration-snowflake-ci.yml @@ -48,7 +48,7 @@ jobs: # working-directory: ${{ env.project_dir }} # services: # postgres: -# image: postgres:latest +# image: postgankane/pgvectorres:latest # env: # POSTGRES_USER: grai # POSTGRES_PASSWORD: grai diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 602f2d934..a930394df 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -77,7 +77,7 @@ jobs: services: postgres: - image: postgres:latest + image: ankane/pgvector:latest env: POSTGRES_USER: grai POSTGRES_PASSWORD: grai diff --git a/.github/workflows/server-ci.yml b/.github/workflows/server-ci.yml index 73fdf4b87..e788c065f 100644 --- a/.github/workflows/server-ci.yml +++ b/.github/workflows/server-ci.yml @@ -52,7 +52,7 @@ jobs: services: postgres: - image: postgres:latest + image: ankane/pgvector:latest env: POSTGRES_USER: ${{ env.DB_USER }} POSTGRES_PASSWORD: ${{ env.DB_PASSWORD }} diff --git a/docs/pages/deployment/docker.mdx b/docs/pages/deployment/docker.mdx index 6c2c1328a..19407daa4 100644 --- a/docs/pages/deployment/docker.mdx +++ b/docs/pages/deployment/docker.mdx @@ -84,7 +84,7 @@ version: "3.7" services: db: - image: postgres:14.3-alpine + image: ankane/pgvector environment: - POSTGRES_USER=grai - POSTGRES_PASSWORD=grai diff --git a/docs/pages/deployment/kubernetes/kustomize.mdx b/docs/pages/deployment/kubernetes/kustomize.mdx index cd62dac33..52f8be1a8 100644 --- a/docs/pages/deployment/kubernetes/kustomize.mdx +++ b/docs/pages/deployment/kubernetes/kustomize.mdx @@ -50,7 +50,7 @@ spec: - containerPort: 8000 protocol: TCP - name: db-pod - image: postgres:14.3-alpine + image: ankane/pgvector env: - name: POSTGRES_USER value: grai diff --git a/examples/deployment/docker-compose/complete/docker-compose.yml b/examples/deployment/docker-compose/complete/docker-compose.yml index 44a4695ab..5a5a3e349 100755 --- a/examples/deployment/docker-compose/complete/docker-compose.yml +++ b/examples/deployment/docker-compose/complete/docker-compose.yml @@ -2,7 +2,7 @@ version: "3.7" services: db: - image: postgres:14.3-alpine + image: ankane/pgvector extra_hosts: - "host.docker.internal:host-gateway" environment: diff --git a/examples/deployment/docker-compose/demo/docker-compose.yaml b/examples/deployment/docker-compose/demo/docker-compose.yaml index a0724f3b8..053c45e88 100644 --- a/examples/deployment/docker-compose/demo/docker-compose.yaml +++ b/examples/deployment/docker-compose/demo/docker-compose.yaml @@ -2,7 +2,7 @@ version: "3.7" services: db: - image: postgres:14.3-alpine + image: ankane/pgvector environment: - POSTGRES_USER=grai - POSTGRES_PASSWORD=grai diff --git a/examples/deployment/docker-compose/minimal/docker-compose.yml b/examples/deployment/docker-compose/minimal/docker-compose.yml index 7cd01a31e..82eb6b524 100755 --- a/examples/deployment/docker-compose/minimal/docker-compose.yml +++ b/examples/deployment/docker-compose/minimal/docker-compose.yml @@ -2,7 +2,7 @@ version: "3.7" services: db: - image: postgres:14.3-alpine + image: ankane/pgvector environment: - POSTGRES_USER=grai - POSTGRES_PASSWORD=grai diff --git a/examples/deployment/k8s/deployment.yaml b/examples/deployment/k8s/deployment.yaml index 4bee4a62a..139a2631f 100644 --- a/examples/deployment/k8s/deployment.yaml +++ b/examples/deployment/k8s/deployment.yaml @@ -34,7 +34,7 @@ spec: - containerPort: 8000 protocol: TCP - name: db-pod - image: postgres:14.3-alpine + image: ankane/pgvector env: - name: POSTGRES_USER value: grai diff --git a/grai-client/docker-compose.yml b/grai-client/docker-compose.yml index 94297b0ef..d3d1ac5de 100755 --- a/grai-client/docker-compose.yml +++ b/grai-client/docker-compose.yml @@ -32,7 +32,7 @@ services: start_period: 20s db: - image: postgres:14.3-alpine + image: ankane/pgvector environment: - POSTGRES_USER=grai - POSTGRES_PASSWORD=grai diff --git a/grai-integrations/source-dbt-cloud/docker-compose.yml b/grai-integrations/source-dbt-cloud/docker-compose.yml index 97803550d..83c79333e 100644 --- a/grai-integrations/source-dbt-cloud/docker-compose.yml +++ b/grai-integrations/source-dbt-cloud/docker-compose.yml @@ -3,7 +3,7 @@ version: "3.7" services: postgres: - image: postgres:latest + image: ankane/pgvector environment: - POSTGRES_USER=grai - POSTGRES_PASSWORD=grai diff --git a/grai-integrations/source-dbt/docker-compose.yml b/grai-integrations/source-dbt/docker-compose.yml index 97803550d..83c79333e 100644 --- a/grai-integrations/source-dbt/docker-compose.yml +++ b/grai-integrations/source-dbt/docker-compose.yml @@ -3,7 +3,7 @@ version: "3.7" services: postgres: - image: postgres:latest + image: ankane/pgvector environment: - POSTGRES_USER=grai - POSTGRES_PASSWORD=grai diff --git a/grai-integrations/source-metabase/docker-compose.yml b/grai-integrations/source-metabase/docker-compose.yml index 58c4dcc71..129f53c46 100644 --- a/grai-integrations/source-metabase/docker-compose.yml +++ b/grai-integrations/source-metabase/docker-compose.yml @@ -2,7 +2,7 @@ version: '3.8' services: db: - image: postgres:13 + image: ankane/pgvector environment: POSTGRES_PASSWORD: mysecretpassword POSTGRES_USER: metabase diff --git a/grai-integrations/source-mysql/docker-compose.yml b/grai-integrations/source-mysql/docker-compose.yml index f3c6ff7b1..8f1d5b0ae 100644 --- a/grai-integrations/source-mysql/docker-compose.yml +++ b/grai-integrations/source-mysql/docker-compose.yml @@ -22,7 +22,7 @@ services: # retries: 10 # start_period: 20s # db: -# image: postgres:latest +# image: ankane/pgvector # environment: # - POSTGRES_USER=grai # - POSTGRES_PASSWORD=grai diff --git a/grai-integrations/source-postgres/docker-compose.yml b/grai-integrations/source-postgres/docker-compose.yml index 775334bbb..bc20cec6b 100644 --- a/grai-integrations/source-postgres/docker-compose.yml +++ b/grai-integrations/source-postgres/docker-compose.yml @@ -2,7 +2,7 @@ version: "3.7" services: postgres: - image: "postgres:latest" + image: ankane/pgvector volumes: - ./dev-database/schemas:/docker-entrypoint-initdb.d environment: diff --git a/grai-server/app/ai_build_script.py b/grai-server/app/ai_build_script.py index d0c42143f..fbbfe4f96 100644 --- a/grai-server/app/ai_build_script.py +++ b/grai-server/app/ai_build_script.py @@ -2,7 +2,7 @@ import tiktoken -supported_models = ["gpt-4", "gpt-4-32k", "gpt-3.5-turbo"] +supported_models = ["gpt-4", "gpt-4-32k", "gpt-3.5-turbo", "gpt-4-1106-preview"] cache_dir = os.environ.get("TIKTOKEN_CACHE_DIR", None) print("CACHE DIR: ", cache_dir) diff --git a/grai-server/app/connections/adapters/schemas.py b/grai-server/app/connections/adapters/schemas.py index b606afba4..9a375501e 100644 --- a/grai-server/app/connections/adapters/schemas.py +++ b/grai-server/app/connections/adapters/schemas.py @@ -1,5 +1,5 @@ import pprint -from typing import Any, List, Literal, Optional, Sequence, Type +from typing import Any, List, Literal, Optional, Sequence, Type, TypeVar from uuid import UUID from django.db.models import Q @@ -20,6 +20,9 @@ from lineage.models import Edge, Node, Source from workspaces.models import Organisation, Workspace +T = TypeVar("T") +R = TypeVar("R") + def get_data_source_models(data_sources: Sequence[SourceSpec], workspace: Workspace) -> List[Source]: uuids = [source_id for source_id in data_sources if isinstance(source_id, UUID)] @@ -198,7 +201,10 @@ def source_model_to_source_schema(model: Source, schema_type: Literal["SourceV1" @model_to_schema.register def node_model_to_node_v1_schema(model: Node, schema_type: Literal["NodeV1"]) -> NodeV1: # TODO: Add data_sources - return NodeV1.from_spec({**model.__dict__, "data_sources": []}) + + data_sources: list[SourceV1] = model_to_schema(model.data_sources.all(), "SourceV1") + result = NodeV1.from_spec({**model.__dict__, "data_sources": [source.spec for source in data_sources]}) + return result @model_to_schema.register @@ -220,11 +226,11 @@ def edge_model_to_edge_v1_schema(model: Edge, schema_type: Literal["EdgeV1"]) -> @model_to_schema.register -def sequence_model_to_sequence_v1_schema(models: list | tuple, schema_type: str) -> list | tuple: - iter = (model_to_schema(model, schema_type) for model in models) - return type(models)(iter) +def sequence_model_to_sequence_v1_schema(models: list[T] | tuple[T], schema_type: str) -> list[R]: + result = list(model_to_schema(model, schema_type) for model in models) + return result @model_to_schema.register -def queryset_to_sequence_v1_schema(models: QuerySet, schema_type: str) -> list: +def queryset_to_sequence_v1_schema(models: QuerySet[T], schema_type: str) -> list[R]: return [model_to_schema(model, schema_type) for model in models] diff --git a/grai-server/app/connections/tests/test_tasks.py b/grai-server/app/connections/tests/test_tasks.py index cce33b66e..ff0b3d070 100644 --- a/grai-server/app/connections/tests/test_tasks.py +++ b/grai-server/app/connections/tests/test_tasks.py @@ -265,12 +265,7 @@ def test_run_update_server_postgres_wrong_password(self, test_workspace, test_po assert run.status == "error" assert run.metadata["error"] == "Incorrect password" - assert ( - run.metadata["message"] - == 'connection to server at "localhost" (127.0.0.1), port 5432 failed: FATAL: password authentication failed for user "grai"\n' - or run.metadata["message"] - == 'connection to server at "127.0.0.1", port 5432 failed: FATAL: password authentication failed for user "grai"\n' - ) + assert run.metadata["message"].endswith('FATAL: password authentication failed for user "grai"\n') def test_run_update_server_postgres_no_database(self, test_workspace, test_postgres_connector, test_source): connection = Connection.objects.create( @@ -294,12 +289,7 @@ def test_run_update_server_postgres_no_database(self, test_workspace, test_postg assert run.status == "error" assert run.metadata["error"] == "Missing permission" - assert ( - run.metadata["message"] - == 'connection to server at "localhost" (127.0.0.1), port 5432 failed: FATAL: database "wrong" does not exist\n' - or run.metadata["message"] - == 'connection to server at "127.0.0.1", port 5432 failed: FATAL: database "wrong" does not exist\n' - ) + assert run.metadata["message"].endswith('FATAL: database "wrong" does not exist\n') def test_run_update_server_no_connector(self, test_workspace, test_connector, test_source): connection = Connection.objects.create( diff --git a/grai-server/app/grAI/authentication.py b/grai-server/app/grAI/authentication.py index 633692b8f..2da7e40c1 100644 --- a/grai-server/app/grAI/authentication.py +++ b/grai-server/app/grAI/authentication.py @@ -31,7 +31,6 @@ async def __call__(self, scope, receive, send): raise PermissionDeniedError("You do not have permission to access this resource.") scope.setdefault("metadata", {}) - scope["metadata"]["workspace_id"] = workspace_id scope["metadata"]["membership"] = membership return await super().__call__(scope, receive, send) diff --git a/grai-server/app/grAI/chat_implementations.py b/grai-server/app/grAI/chat_implementations.py index 672e09118..5ecfcf247 100644 --- a/grai-server/app/grAI/chat_implementations.py +++ b/grai-server/app/grAI/chat_implementations.py @@ -1,27 +1,44 @@ -import copy import json import logging -import operator import uuid -from abc import ABC, abstractmethod -from functools import cached_property, partial, reduce -from itertools import accumulate -from typing import Annotated, Any, Callable, Literal, ParamSpec, Type, TypeVar, Union -import itertools + +from typing import Any, Callable, ParamSpec, TypeVar, Coroutine + +from openai.types.chat import ChatCompletion +from openai.types.completion_usage import CompletionUsage import openai +from grAI.utils import get_token_limit, chunker import tiktoken from django.conf import settings from django.core.cache import cache -from django.db.models import Q -from grai_schemas.serializers import GraiYamlSerializer -from pydantic import BaseModel, Field +import asyncio +from workspaces.models import Workspace from channels.db import database_sync_to_async -from connections.adapters.schemas import model_to_schema + from grAI.models import Message -from lineage.models import Edge, Node -from workspaces.models import Workspace +from grAI.chat_types import ( + UserMessage, + SystemMessage, + FunctionMessage, + ChatMessages, + UsageMessage, + ChatMessage, + SupportedMessageTypes, + to_gpt, +) +from grAI.tools import ( + NodeLookupAPI, + EmbeddingSearchAPI, + NHopQueryAPI, + InvalidAPI, + EdgeLookupAPI, + FuzzyMatchNodesAPI, + SourceLookupAPI, +) +from grAI.summarization import ProgressiveSummarization, ToolSummarization, GraiSummarization +from grAI.utils import compute_total_tokens logging.basicConfig(level=logging.DEBUG) @@ -32,372 +49,16 @@ R = TypeVar("R") P = ParamSpec("P") -RoleType = Union[Literal["user"], Literal["system"], Literal["assistant"]] - - -class BaseMessage(BaseModel): - role: str - content: str - token_length: int - - def representation(self) -> dict: - return {"role": self.role, "content": self.content} - - def chunk_content(self, n_chunks: int = 2) -> list[str]: - if self.token_length is None: - raise ValueError("Cannot chunk content without a token length") - - chunk_size = self.token_length // n_chunks - chunks = [self.content[i : i + chunk_size] for i in range(0, len(self.content), chunk_size)] - return chunks - - -class UserMessage(BaseMessage): - role: Literal["user"] = "user" - - -class SystemMessage(BaseMessage): - role: Literal["system"] = "system" - - -class AIMessage(BaseMessage): - role: Literal["assistant"] = "assistant" - - -class FunctionMessage(BaseMessage): - role: Literal["function"] = "function" - name: str - - def representation(self) -> dict: - return {"role": self.role, "content": self.content, "name": self.name} - - -class ChatMessage(BaseModel): - message: Union[UserMessage, SystemMessage, AIMessage, FunctionMessage] - - -class ChatMessages(BaseModel): - messages: list[BaseMessage] - - def to_gpt(self) -> list[dict]: - return [message.representation() for message in self.messages] - - def __getitem__(self, index): - return self.messages[index] - - def __len__(self) -> int: - return len(self.messages) - - def append(self, item): - self.messages.append(item) - - def extend(self, items): - self.messages.extend(items) - - def token_length(self) -> int: - return sum(m.token_length for m in self.messages) - - -def get_token_limit(model_type: str) -> int: - OPENAI_TOKEN_LIMITS = {"gpt-4": 8192, "gpt-3.5-turbo": 4096, "gpt-3.5-turbo-16k": 16385, "gpt-4-32k": 32768} - - if model_type in OPENAI_TOKEN_LIMITS: - return OPENAI_TOKEN_LIMITS[model_type] - elif model_type.endswith("k"): - return int(model_type.split("-")[-1]) * 1024 - elif model_type.startswith("gpt-4"): - return 8192 - elif model_type.startswith("gpt-3.5"): - return 4096 - else: - return 2049 - - -class API(ABC): - schema_model: BaseModel - description: str - id: str - - @abstractmethod - def call(self, **kwargs) -> (Any, str): - pass - - def serialize(self, result) -> str: - if isinstance(result, str): - return result - - return GraiYamlSerializer.dump(result) - - async def response(self, **kwargs) -> str: - logging.info(f"Calling {self.id} with {kwargs}") - obj, message = await self.call(**kwargs) - - logging.info(f"Building Response message for {self.id} with {kwargs}") - if message is None: - result = self.serialize(obj) - else: - result = f"{self.serialize(obj)}\n{message}" - - return result - - def gpt_definition(self) -> dict: - return {"name": self.id, "description": self.description, "parameters": self.schema_model.schema()} - - -class NodeIdentifier(BaseModel): - name: str = Field(description="The name of the node to query for") - namespace: str = Field(description="The namespace of the node to query for") - - -class NodeLookup(BaseModel): - nodes: list[NodeIdentifier] = Field(description="A list of nodes to lookup") - - -class NodeLookupAPI(API): - id = "node_lookup" - description = "Lookup metadata about one or more nodes if you know precisely which node(s) to lookup" - schema_model = NodeLookup - - def __init__(self, workspace: str | uuid.UUID): - self.workspace = workspace - self.query_limit = MAX_RETURN_LIMIT - - @staticmethod - def response_message(result_set: list[Node]) -> str | None: - total_results = len(result_set) - if total_results == 0: - message = "No results found matching these query conditions." - else: - message = None - - return message - - @database_sync_to_async - def call(self, **kwargs) -> (list[Node], str | None): - try: - validation = self.schema_model(**kwargs) - except: - return [], "Invalid input. Please check your input and try again." - q_objects = (Q(**node.dict(exclude_none=True)) for node in validation.nodes) - query = reduce(operator.or_, q_objects) - result_set = Node.objects.filter(workspace=self.workspace).filter(query).order_by("-created_at").all() - response_items = model_to_schema(result_set[: self.query_limit], "NodeV1") - return response_items, self.response_message(result_set) - - -class FuzzyMatchQuery(BaseModel): - string: str = Field(description="The fuzzy string used to search amongst node names") - - -class FuzzyMatchNodesAPI(API): - id = "node_fuzzy_lookup" - description = "Performs a fuzzy search for nodes matching a name regardless of namespace" - schema_model = FuzzyMatchQuery - - def __init__(self, workspace: str | uuid.UUID): - self.workspace = workspace - self.query_limit = MAX_RETURN_LIMIT - - @staticmethod - def response_message(result_set: list[Node]) -> str | None: - total_results = len(result_set) - if total_results == 0: - message = "No results found matching these query conditions." - else: - message = None - - return message - - @database_sync_to_async - def call(self, string: str) -> (list, str | None): - result_set = ( - Node.objects.filter(workspace=self.workspace).filter(name__contains=string).order_by("-created_at").all() - ) - response_items = [{"name": node.name, "namespace": node.namespace} for node in result_set] - - return response_items, self.response_message(result_set) - - -class EdgeLookupSchema(BaseModel): - source: uuid.UUID | None = Field(description="The primary key of the source node on an edge", default=None) - destination: uuid.UUID | None = Field( - description="The primary key of the destination node on an edge", default=None - ) - - -class MultiEdgeLookup(BaseModel): - edges: list[EdgeLookupSchema] = Field( - description="List of edges to lookup. Edges can be uniquely identified by a (name, namespace) tuple, or by a (source, destination) tuple of the nodes the edge connects" - ) - - -class EdgeLookupAPI(API): - id = "edge_lookup" - description = """ - This function Supports looking up edges from a data lineage graph. For example, a query with name=Test but no - namespace value will return all edges explicitly named "Test" regardless of namespace. - Edges are uniquely identified both by their (name, namespace), and by the (source, destination) nodes they connect. - """ - schema_model = MultiEdgeLookup - - def __init__(self, workspace: str | uuid.UUID): - self.workspace = workspace - self.query_limit = MAX_RETURN_LIMIT - - @staticmethod - def response_message(result_set: list[Edge]) -> str | None: - total_results = len(result_set) - if total_results == 0: - message = "No results found matching these query conditions." - else: - message = None - - return message - - @database_sync_to_async - def call(self, **kwargs) -> (list[Edge], str | None): - validation = self.schema_model(**kwargs) - q_objects = (Q(**node.dict(exclude_none=True)) for node in validation.edges) - query = reduce(operator.or_, q_objects) - result_set = Edge.objects.filter(workspace=self.workspace).filter(query).all()[: self.query_limit] - return model_to_schema(result_set[: self.query_limit], "EdgeV1"), self.response_message(result_set) - - -class EdgeFuzzyLookupSchema(BaseModel): - name__contains: str | None = Field( - description="The name of the edge to lookup perform a fuzzy search on", default=None - ) - namespace__contains: str | None = Field( - description="The namespace of the edge to lookup perform a fuzzy search on", default=None - ) - is_active: bool | None = Field(description="Whether or not the edge is active", default=True) - - -class MultiFuzzyEdgeLookup(BaseModel): - edges: list[EdgeLookupSchema] = Field( - description="List of edges to lookup. Edges can be uniquely identified by a (name, namespace) tuple, or by a (source, destination) tuple of the nodes the edge connects" - ) - - -class EdgeFuzzyLookupAPI(EdgeLookupAPI): - id = "edge_fuzzy_lookup" - description = """ - This function Supports looking up edges from a data lineage graph. For example, a query with name__contains=test - but no namespace value will return all edges whose names contain the substring "test" regardless of namespace. - Edges are uniquely identified both by their (name, namespace), and by the (source, destination) nodes they connect. - """ - schema_model = MultiFuzzyEdgeLookup - - -class NodeEdgeSerializer: - def __init__(self, nodes, edges): - self.nodes = nodes - self.edges = edges - - def representation(self, path=None): - items = [item.spec for item in (*self.nodes, *self.edges)] - return GraiYamlSerializer.dump(items, path) - - def __str__(self): - return self.representation() - - -class NHopQuerySchema(BaseModel): - name: str = Field(description="The name of the node to query for") - namespace: str = Field(description="The namespace of the node to query for") - n: int = Field(description="The number of hops to query for", default=1) - - -class NHopQueryAPI(API): - id: str = "n_hop_query" - description: str = "query for nodes and edges within a specified number of hops from a given node" - schema_model = NHopQuerySchema - - def __init__(self, workspace: str | uuid.UUID): - self.workspace = workspace - - @staticmethod - def response_message(result_set: list[str]) -> str | None: - total_results = len(result_set) - if total_results == 0: - message = "No results found matching these query conditions." - else: - message = "Results are returned in the following format: (source.name, source.namespace) -> (destination.name, destination.namespace)" - - return message - - @staticmethod - def filter(queryset: list[Edge], source_nodes: list[Node], dest_nodes: list[Node]) -> tuple[list[Node], list[Node]]: - def get_id(node: Node) -> tuple[str, str]: - return node.name, node.namespace - - source_ids: set[T] = {get_id(node) for node in source_nodes} - dest_ids: set[T] = {get_id(node) for node in dest_nodes} - query_hashes: set[tuple[T, T]] = {(get_id(node.source), get_id(node.destination)) for node in queryset} - - source_resp = [n.destination for n, hashes in zip(queryset, query_hashes) if hashes[0] in source_ids] - dest_resp = [n.source for n, hashes in zip(queryset, query_hashes) if hashes[1] in dest_ids] - return source_resp, dest_resp - - @database_sync_to_async - def call(self, **kwargs) -> (list[Edge | Node], str | None): - def edge_label(edge: Edge): - return f"({edge.source.name}, {edge.source.namespace}) -> ({edge.destination.name}, {edge.destination.namespace})" - - try: - inp = self.schema_model(**kwargs) - except Exception as e: - return [], f"Invalid input: {e}" - - source_node = Node.objects.filter(workspace__id=self.workspace, name=inp.name, namespace=inp.namespace).first() - if source_node is None: - return [], self.response_message([]) - - source_nodes = [source_node] - dest_nodes = [source_node] - - return_edges = [] - for i in range(inp.n): - query = Q(source__in=source_nodes) | Q(destination__in=dest_nodes) - edges = Edge.objects.filter(query).select_related("source", "destination").all() - - source_nodes, dest_nodes = self.filter(edges, source_nodes, dest_nodes) - return_edges.extend((edge_label(item) for item in edges)) - - if len(source_nodes) == 0 and len(dest_nodes) == 0: - break - - return return_edges, self.response_message(return_edges) - - -class InvalidApiSchema(BaseModel): - pass - - -class InvalidAPI(API): - id = "invalid_api_endpoint" - description = "placeholder" - schema_model = InvalidApiSchema - - def __init__(self, apis): - self.function_string = ", ".join([f"`{api.id}`" for api in apis]) - - @database_sync_to_async - def call(self, *args, **kwargs): - return f"Invalid API Endpoint. That function does not exist. The supported apis are {self.function_string}" - def serialize(self, result): - return result - - -class FakeEncoder: - def encode(self, text): - return [1, 2, 3, 4] - - -def pre_compute_graph(workspace: str | uuid.UUID): - query_filter = Q(workspace=workspace) & Q(is_active=True) - edges = Edge.objects.filter(query_filter).all() +class SummaryPrompt: + def __init__(self, encoder: tiktoken.Encoding): + prompt_str = """ + Please summarize this conversation encoding the most important information a future agent would need to continue + working on the problem with me. Please insure your response is a + text based summary of the conversation to this point with all relevant context for the next agent. + """ + self.prompt = SystemMessage(content=prompt_str) + self.token_usage = len(encoder.encode(prompt_str)) class BaseConversation: @@ -405,193 +66,183 @@ def __init__( self, chat_id: str, prompt: str, + client: openai.AsyncOpenAI | None = None, model_type: str = settings.OPENAI_PREFERRED_MODEL, - user: str = str(uuid.uuid4()), functions: list = None, verbose=False, ): if functions is None: functions = [] + if client is None: + client = openai.AsyncOpenAI(api_key=settings.OPENAI_API_KEY, organization=settings.OPENAI_ORG_ID) + + self.client = client self.model_type = model_type - self.token_limit = get_token_limit(self.model_type) self.encoder = tiktoken.encoding_for_model(self.model_type) # self.encoder = FakeEncoder() + self.chat_id = chat_id self.cache_id = f"grAI:chat_id:{chat_id}" - self.system_context = prompt - self.user = user + self.api_functions = {func.id: func for func in functions} - self.verbose = verbose + self.invalid_api = InvalidAPI(self.api_functions.values()) - self.prompt_message = self.build_message(SystemMessage, content=self.system_context) + self.prompt_message = SystemMessage(content=prompt) - def build_message(self, message_type: Type[T], content: str, **kwargs) -> T: - return message_type(content=content, token_length=len(self.encoder.encode(content)), **kwargs) + self.base_tokens = self.get_base_tokens() + self.max_model_tokens = get_token_limit(self.model_type) + self.max_tokens = self.max_model_tokens - self.base_tokens + + def get_base_tokens(self) -> int: + tool_strings = (json.dumps(func, indent=2) for func in self.functions()) + num_tool_tokens = sum(len(self.encoder.encode(tool)) for tool in tool_strings) + num_prompt_tokens = len(self.encoder.encode(self.prompt_message.content)) + return int((num_tool_tokens + num_prompt_tokens) * 1.2) + + def functions(self): + return [func.gpt_definition() for func in self.api_functions.values()] @property - def cached_messages(self) -> list[BaseMessage]: - messages = [ChatMessage(message=message).message for message in cache.get(self.cache_id)] - return messages + async def cached_messages(self) -> list[SupportedMessageTypes]: + cache_result = cache.get(self.cache_id, None) + + if cache_result is None: + return await self.hydrate_chat() + else: + return [ChatMessage(message=message).message for message in cache_result] @cached_messages.setter - def cached_messages(self, values: list[BaseMessage]): - cache.set(self.cache_id, [v.dict() for v in values]) + def cached_messages(self, values: list[SupportedMessageTypes]): + cache.set(self.cache_id, [v.dict(exclude_none=True) for v in values]) @database_sync_to_async - def hydrate_chat(self): - logging.info(f"Hydrating chat history for conversations: {self.chat_id}") - messages = cache.get(self.cache_id, None) - - if messages is None: - logging.info(f"Loading chat history for chat {self.chat_id} from database") - messages_iter = ( - {"role": m.role, "content": m.message, "token_length": len(self.encoder.encode(m.message))} - for m in Message.objects.filter(chat_id=self.chat_id).order_by("-created_at").all() - ) - messages_list = [ChatMessage(message=message).message for message in messages_iter] - self.cached_messages = messages_list - - async def summarize(self, messages: list[BaseMessage]) -> AIMessage: - summary_prompt = """ - Please summarize this conversation encoding the most important information a future agent would need to continue - working on the problem with me. Please insure you do not call any functions providing an exclusively - text based summary of the conversation to this point with all relevant context for the next agent. + def hydrate_chat(self) -> list[SupportedMessageTypes]: + """ + Hydration doesn't currently capture function call context or summarization and will need to be updated to do so. """ - message = self.build_message(UserMessage, summary_prompt) - summary_messages = ChatMessages(messages=[*messages, message]) - logging.info(f"Summarizing conversation for chat: {self.chat_id}") - response = await openai.ChatCompletion.acreate( - model=self.model_type, user=self.user, messages=summary_messages.to_gpt() + messages_iter = ( + ChatMessage(message={"role": m.role, "content": m.message}).message + for m in Message.objects.filter(chat_id=self.chat_id).order_by("-created_at").all() ) - - # this is hacky for now - summary_message = self.build_message(AIMessage, response.choices[0].message.content) - return summary_message - - def functions(self): - return [func.gpt_definition() for func in self.api_functions.values()] + messages = [self.prompt_message, *messages_iter] + self.cached_messages = messages + return messages @property - def model(self) -> Callable[P, R]: - model = partial(openai.ChatCompletion.acreate, model=self.model_type, user=self.user) - + def model(self) -> Callable[[list[SupportedMessageTypes]], Coroutine[Any, Any, ChatCompletion]]: + base_kwargs = {"model": self.model_type} if len(functions := self.functions()) > 0: - model = partial(model, functions=functions) - - return model - - async def evaluate_summary(self, messages: ChatMessages) -> ChatMessages: - model_limit = int(self.token_limit * 0.85) - - requires_summary = messages.token_length() > model_limit - while requires_summary: - prev_accumulated_tokens = 0 - accumulated_tokens = 0 - i = 0 - for i, message in enumerate(messages.messages): - accumulated_tokens += message.token_length - if accumulated_tokens > model_limit: - break - prev_accumulated_tokens = accumulated_tokens - - available_tokens = model_limit - prev_accumulated_tokens - message = messages.messages[i] - if i == len(messages) and accumulated_tokens < model_limit: - requires_summary = False - elif available_tokens >= message.token_length: - summary = await self.summarize(messages.messages[: (i + 1)]) - messages = [self.prompt_message, summary, *messages.messages[(i + 1) :]] - else: - encoding = self.encoder.encode(message.content) - message_obj = copy.copy(message) - next_message_obj = copy.copy(message) + base_kwargs |= {"tools": functions, "tool_choice": "auto"} + + async def inner(messages: list[SupportedMessageTypes]) -> ChatCompletion: + try: + response = await self.client.chat.completions.create(messages=to_gpt(messages), **base_kwargs) + except openai.BadRequestError as e: + # Just in case we hit a marginal difference from the calculation above. + if e.code == "context_length_exceeded": + messages = await self.evaluate_summary(messages) + response = await self.client.chat.completions.create(messages=to_gpt(messages), **base_kwargs) + else: + raise e - message_obj.content = self.encoder.decode(encoding[:available_tokens]) - message_obj.token_length = len(encoding[:available_tokens]) - next_message_obj.content = self.encoder.decode(encoding[available_tokens:]) - next_message_obj.token_length = len(encoding[available_tokens:]) + return response - summary = await self.summarize([*messages.messages[:i], message_obj]) - messages = [self.prompt_message, summary, next_message_obj, *messages.messages[i:]] + return inner - messages = ChatMessages(messages=messages) - return messages + async def evaluate_summary( + self, messages: list[SupportedMessageTypes], max_tokens: int | None = None + ) -> list[SupportedMessageTypes]: + max_tokens = max_tokens if max_tokens is not None else self.max_tokens + summarizer = GraiSummarization(model=self.model_type, client=self.client, max_tokens=max_tokens) + results = await summarizer.call(messages) + return [self.prompt_message, *results] async def request(self, user_input: str) -> str: - logging.info(f"Responding to request for: {self.chat_id}") - - messages = ChatMessages( - messages=[self.prompt_message, *self.cached_messages, self.build_message(UserMessage, content=user_input)] - ) - - result = None - stop = False - n = 0 - while not stop: - messages = await self.evaluate_summary(messages) - response = await self.model(messages=messages.to_gpt()) - - if result: - for choice in response.choices: - if result != choice: - result = choice - break - else: - result = response.choices[0] - - if stop := result.finish_reason == "stop": - message = self.build_message(AIMessage, content=result.message.content) - messages.append(message) - elif result.finish_reason == "function_call": - func_id = result.message.function_call.name - func_kwargs = json.loads(result.message.function_call.arguments) - api = self.api_functions.get(func_id, InvalidAPI(self.api_functions.values())) - response = await api.response(**func_kwargs) - - if isinstance(api, InvalidAPI): - message = self.build_message(SystemMessage, response) - messages.append(message) - else: - message = self.build_message(FunctionMessage, content=response, name=func_id) + original_messages: list[SupportedMessageTypes] = await self.cached_messages + messages = original_messages.copy() + + user_query = UserMessage(content=user_input) + messages.append(user_query) + + final_response: str | None = None + while final_response is None: + if compute_total_tokens(messages, self.encoder) > self.max_tokens: + messages = await self.evaluate_summary(messages) + + response = await self.model(messages) + response_choice = response.choices[0] + + if response_choice.finish_reason == "stop": + final_response = response_choice.message.content + elif response_choice.finish_reason == "length": + messages = await self.evaluate_summary(messages) + elif response_choice.finish_reason == "content_filter": + final_response = "Warning: This message was filtered by the content filter." + elif response_choice.finish_reason == "tool_calls": + messages.append(response_choice.message) + for i, tool_call in enumerate(response_choice.message.tool_calls): + func_id = tool_call.function.name + func_kwargs = json.loads(tool_call.function.arguments) + api = self.api_functions.get(func_id, self.invalid_api) + response = await api.response(**func_kwargs) + message = FunctionMessage( + content=response, + name=func_id, + tool_call_id=tool_call.id, + args=func_kwargs, + ) messages.append(message) - elif result.finish_reason == "length": - summary = await self.summarize(messages[:-1]) - messages = ChatMessages(messages=[self.prompt_message, summary, messages[-1]]) else: - # valid values include length, content_filter, null - raise NotImplementedError(f"No stop reason for {result.finish_reason}") + logging.error(f"Encountered an unknown openai finish reason {response_choice.finish_reason}") + final_response = response_choice.message.content - self.cached_messages = messages.messages - return result.message.content + self.cached_messages = messages + return final_response async def get_chat_conversation( - chat_id: str | uuid.UUID, workspace: str | uuid.UUID, model_type: str = settings.OPENAI_PREFERRED_MODEL + chat_id: str | uuid.UUID, workspace: Workspace | uuid.UUID, model_type: str = settings.OPENAI_PREFERRED_MODEL ): chat_prompt = """ You are a helpful assistant with domain expertise about an organizations data and data infrastructure. - - * You know how to query for additional context and metadata about any data in the organization. - * Unique pieces of data like a column in a database is identified by a (name, namespace) tuple or a unique uuid. - * You can help users discover new data or identify and correct issues such as broken data pipelines, and BI dashboards. - * Your responses must use Markdown syntax - * When a user asks you a question about their data you should proactively look up additional context about the data. - * Nodes contain a metadata field with extra context about the node. - * Nodes and Edges are typed. You can identify the type under `metadata.grai.node_type` or `metadata.grai.edge_type` - * If a Node has a type like `Column` with a `TableToColumn` Edge connecting to a `Table` node, the Column node represents a column in the table. - * Node names for databases and datawarehouses are constructed following `{schema}.{table}.{column}` format e.g. a column named `id` in a table named `users` in a schema named `public` would be identified as `public.users.id` + All of that context is embedded in a graph where nodes represent individual data concepts like a database column or + table. Edges in the graph represent relationships between data such as where the data was sourced from. + + Rules you MUST follow: + - Understand the context of a users request and what they are trying to accomplish. + - If a user asks about specific data, like a column, you will need to exhaustively search for that data. If + you're unable to find results you should look for related data using other available tools. + - You will verify your responses are accurate and exists in their infrastructure. + - Your responses use GitHub flavored Markdown syntax. + + Data Structure Notes: + - Unique pieces of data like a column in a database is identified by a (name, namespace) tuple or a unique uuid. + - Nodes contain a metadata field with extra context about the node. + - Nodes and Edges are typed. You can identify the type under `metadata.grai.node_type` or `metadata.grai.edge_type` + - If a Node has a type like `Column` with a `TableToColumn` Edge connecting to a `Table` node, the Column node + represents a column in the table. + - Node names for databases and datawarehouses are constructed following `{schema}.{table}.{column}` format e.g. a + column named `id` in a table named `users` in a schema named `public` would be identified as `public.users.id`. + - Naming conventions NEVER follow a `{namespace}.{name}` format. Namespace and name will ALWAYS be referred to as + two separate fields. Do not assume a namespace is a schema or a database. """ + client = openai.AsyncOpenAI(api_key=settings.OPENAI_API_KEY, organization=settings.OPENAI_ORG_ID) + + if workspace.ai_enabled: + search_func = EmbeddingSearchAPI(workspace=workspace.id) + else: + search_func = FuzzyMatchNodesAPI(workspace=workspace.id) + functions = [ - NodeLookupAPI(workspace=workspace), - # Todo: edge lookup is broken - # EdgeLookupAPI(workspace=workspace), - FuzzyMatchNodesAPI(workspace=workspace), - NHopQueryAPI(workspace=workspace), + NodeLookupAPI(workspace=workspace.id), + EdgeLookupAPI(workspace=workspace.id), + SourceLookupAPI(workspace=workspace.id), + NHopQueryAPI(workspace=workspace.id), + search_func, ] conversation = BaseConversation( - prompt=chat_prompt, model_type=model_type, functions=functions, chat_id=str(chat_id) + prompt=chat_prompt, model_type=model_type, functions=functions, chat_id=str(chat_id), client=client ) - await conversation.hydrate_chat() return conversation diff --git a/grai-server/app/grAI/chat_types.py b/grai-server/app/grAI/chat_types.py new file mode 100644 index 000000000..8c3ae57aa --- /dev/null +++ b/grai-server/app/grAI/chat_types.py @@ -0,0 +1,121 @@ +from openai.types.chat.chat_completion_message import ChatCompletionMessage +from openai.types.completion_usage import CompletionUsage +from pydantic import BaseModel +from typing import Any, Literal, Union +from multimethod import multimethod + +RoleType = Union[Literal["user"], Literal["system"], Literal["assistant"]] + + +class BaseMessage(BaseModel): + role: str + content: str + + def representation(self) -> dict: + return {"role": self.role, "content": self.content} + + +class UserMessage(BaseMessage): + role: Literal["user"] = "user" + + +class SystemMessage(BaseMessage): + role: Literal["system"] = "system" + + +class AIMessage(BaseMessage): + role: Literal["assistant"] = "assistant" + + +class FunctionMessage(BaseMessage): + role: Literal["tool"] = "tool" + name: str + tool_call_id: str + args: dict + + def representation(self) -> dict: + return {"tool_call_id": self.tool_call_id, "role": self.role, "content": self.content, "name": self.name} + + +SupportedMessageTypes = Union[UserMessage, SystemMessage, AIMessage, FunctionMessage, ChatCompletionMessage] + + +class ChatMessage(BaseModel): + message: SupportedMessageTypes + + +class UsageMessage(BaseModel): + usage: CompletionUsage + message: SupportedMessageTypes + encoding: list | None = None + + +@multimethod +def to_gpt(message: Any) -> dict | ChatCompletionMessage: + raise Exception(f"Cannot convert {type(message)} to GPT format") + + +@to_gpt.register +def list_to_gpt(messages: list) -> list[dict]: + return [to_gpt(message) for message in messages] + + +@to_gpt.register +def usage_message_to_gpt(message: UsageMessage) -> dict: + return to_gpt(message.message) + + +@to_gpt.register +def dict_to_gpt(message: dict) -> dict: + return message + + +@to_gpt.register +def base_message_to_gpt(message: BaseMessage) -> dict: + return message.representation() + + +@to_gpt.register +def chat_completion_message_to_gpt(message: ChatCompletionMessage) -> ChatCompletionMessage: + return message + + +class ChatMessages(BaseModel): + messages: list[UsageMessage] + + def to_gpt(self) -> list[dict]: + return to_gpt(self.messages) + + def __getitem__(self, index): + return self.messages[index] + + def __len__(self) -> int: + return len(self.messages) + + def __setitem__(self, key, value): + self.messages[key] = value + self.recompute_usage(key) + + def append(self, item): + self.messages.append(item) + self.recompute_usage(len(self.messages) - 1) + + def extend(self, items): + self.messages.extend(items) + self.recompute_usage(len(self.messages) - len(items)) + + @property + def current_usage(self) -> CompletionUsage: + return self.messages[-1].usage + + def recompute_usage(self, from_index: int = 0): + usage = self.messages[from_index].usage + for message in self.messages[from_index + 1 :]: + message.usage.prompt_tokens = usage.total_tokens + message.usage.total_tokens = usage.total_tokens + message.usage.completion_tokens + + def index_over_token_limit(self, token_limit) -> int: + for index, message in enumerate(self.messages): + if message.usage.total_tokens > token_limit: + return index + return index diff --git a/grai-server/app/grAI/consumers.py b/grai-server/app/grAI/consumers.py index bcd00f3aa..0bf4da344 100644 --- a/grai-server/app/grAI/consumers.py +++ b/grai-server/app/grAI/consumers.py @@ -17,7 +17,7 @@ from grAI.models import Message, MessageRoles, UserChat from grAI.websocket_payloads import ChatErrorMessages, ChatEvent from users.models import User -from workspaces.models import Membership +from workspaces.models import Membership, Workspace from asyncio import gather from grai_schemas.serializers import dump_json @@ -50,8 +50,8 @@ def user(self) -> User: return self.scope["user"] @property - def workspace(self) -> str: - return self.scope["metadata"]["workspace_id"] + def workspace(self) -> Workspace: + return self.membership.workspace @property def membership(self) -> Membership: @@ -59,7 +59,7 @@ def membership(self) -> Membership: @property def group_name(self) -> str: - return f"{self.user.id}_{self.workspace}" + return f"{self.user.id}_{self.workspace.id}" async def connect(self): await self.channel_layer.group_add(self.group_name, self.channel_name) diff --git a/grai-server/app/grAI/encoders.py b/grai-server/app/grAI/encoders.py new file mode 100644 index 000000000..9ba89fa4d --- /dev/null +++ b/grai-server/app/grAI/encoders.py @@ -0,0 +1,46 @@ +import tiktoken +import openai +from typing import TypeVar +from django.conf import settings + + +R = TypeVar("R") + + +class OpenAIEmbedder: + def __init__(self, model: str, context_window: int, client: openai.AsyncOpenAI | None = None): + self.model = model + self.model_context_window = context_window + self.encoder = tiktoken.encoding_for_model(self.model) + if client is None: + client = openai.AsyncOpenAI(api_key=settings.OPENAI_API_KEY, organization=settings.OPENAI_ORG_ID) + self.client: openai.AsyncOpenAI = client + + self.heuristic_max_length = int(self.model_context_window * 4 * 0.85) + + def get_encoding(self, content: str) -> list[int]: + return self.encoder.encode(content) + + def decode(self, encoding: list[int]) -> str: + return self.encoder.decode(encoding) + + def get_max_length_content(self, content: str) -> str: + # Heuristic estimate of the max length of content that can be encoded + if len(content) < self.heuristic_max_length: + return content + + encoded = self.get_encoding(content) + if len(encoded) < self.model_context_window: + return content + else: + return self.decode(encoded[: self.model_context_window]) + + async def get_embedding(self, content: str) -> R: + content = self.get_max_length_content(content) + return await self.client.embeddings.create(input=content, model=self.model) + + +if settings.HAS_OPENAI: + Embedder = OpenAIEmbedder("text-embedding-ada-002", 8100) +else: + Embedder = None diff --git a/grai-server/app/grAI/mocks.py b/grai-server/app/grAI/mocks.py new file mode 100644 index 000000000..2ed2f42e4 --- /dev/null +++ b/grai-server/app/grAI/mocks.py @@ -0,0 +1,3 @@ +class FakeEncoder: + def encode(self, text): + return [1, 2, 3, 4] diff --git a/grai-server/app/grAI/summarization.py b/grai-server/app/grAI/summarization.py new file mode 100644 index 000000000..b54a43e4f --- /dev/null +++ b/grai-server/app/grAI/summarization.py @@ -0,0 +1,334 @@ +from grAI.utils import get_token_limit, chunker, tool_segments, ToolSegmentReturnType +from abc import ABC, abstractmethod +import tiktoken +from typing import Any, TypeVar, Coroutine +import openai +from openai import AsyncOpenAI +from django.conf import settings +from asyncio import gather +from grAI.chat_types import SupportedMessageTypes, SystemMessage, FunctionMessage, UserMessage +from openai.types.chat.chat_completion_message import ChatCompletionMessage +from openai.types.chat import ChatCompletion + + +R = TypeVar("R") + + +class ContentLengthError(Exception): + pass + + +class BaseChat(ABC): + def __init__(self, model: str, client: AsyncOpenAI | None = None, max_tokens: int | None = None): + if client is None: + client = openai.AsyncOpenAI(api_key=settings.OPENAI_API_KEY, organization=settings.OPENAI_ORG_ID) + + self.client = client + self.model = model + self.max_tokens = get_token_limit(self.model) if max_tokens is None else max_tokens + + async def completion(self, messages: list[dict] | dict) -> R: + messages = [messages] if isinstance(messages, dict) else messages + return self.client.chat.completions.create(model=self.model, messages=messages) + + @abstractmethod + async def call(self, *args, **kwargs) -> Any: + pass + + +class BaseSummarizer(ABC): + def __init__( + self, + model: str, + prompt_string: str, + client: AsyncOpenAI | None = None, + max_tokens: int | None = None, + ): + if client is None: + client = openai.AsyncOpenAI(api_key=settings.OPENAI_API_KEY, organization=settings.OPENAI_ORG_ID) + self.prompt_string = prompt_string + self.client = client + self.model = model + self.max_tokens = get_token_limit(self.model) if max_tokens is None else max_tokens + self.encoder = tiktoken.encoding_for_model(self.model) + + async def completion(self, messages: list[dict] | dict) -> ChatCompletion: + messages = [messages] if isinstance(messages, dict) else messages + return await self.client.chat.completions.create(model=self.model, messages=messages) + + def prompt(self, content: SupportedMessageTypes): + return self.prompt_string.format(content=content.content, role=content.role) + + def query(self, content: SupportedMessageTypes) -> dict: + return {"role": "system", "content": self.prompt(content)} + + @abstractmethod + async def call(self, *args, **kwargs) -> Any: + pass + + +DEFAULT_SUMMARIZER_PROMPT = """The following is a conversation between a user and an openai AI agent. +'{content}' +please distill the conversation such that a future agent can answer the user's question. +""" + + +class BasicSummarizer(BaseSummarizer): + def __init__( + self, + model: str, + client: AsyncOpenAI | None = None, + prompt_string: str | None = DEFAULT_SUMMARIZER_PROMPT, + max_tokens: int | None = None, + ): + super().__init__(prompt_string=prompt_string, model=model, client=client, max_tokens=max_tokens) + + async def call(self, input_obj: SupportedMessageTypes) -> str: + query = self.query(input_obj) + self.validate(query["content"]) + response = await self.completion(query) + return response.choices[0].message.content + + def validate(self, content: str) -> list[int]: + encoding = self.encoder.encode(content) + if enc_size := len(encoding) < self.max_tokens: + message = ( + f"The provided prompt is {enc_size} tokens long but the chosen model {self.model} only supports" + f"a maximum of {self.max_tokens} tokens. Please reduce the prompt size." + ) + raise ContentLengthError(message) + + return encoding + + +class ConversationSummarizer(BasicSummarizer): + def __init__( + self, + model: str, + prompt_string: str, + client: AsyncOpenAI | None = None, + max_tokens: int | None = None, + ): + super().__init__(prompt_string=prompt_string, model=model, client=client, max_tokens=max_tokens) + + def prompt(self, content: str | list[SupportedMessageTypes], **kwargs) -> str: + if isinstance(content, list): + content = self.prompt_content(content) + return self.prompt_string.format(content=content, **kwargs) + + @staticmethod + def prompt_content(input_obj: list[SupportedMessageTypes]) -> str: + component_iter = (f"{inp.role}\n---\n{inp.content}" for inp in input_obj) + content = "\n---\n".join(component_iter) + return content + + def query(self, content: list[SupportedMessageTypes] | str, **kwargs) -> dict: + prompt = self.prompt(content=content, **kwargs) + return {"role": "system", "content": prompt} + + async def call(self, input_obj: list[SupportedMessageTypes] | str, **kwargs) -> str: + query = self.query(input_obj, **kwargs) + self.validate(query["content"]) + response = await self.completion(query) + return response.choices[0].message.content + + +DEFAULT_REDUCE_PROMPT = DEFAULT_SUMMARIZER_PROMPT + + +class Reduce(ConversationSummarizer): + def __init__( + self, + model: str, + prompt_string: str = DEFAULT_REDUCE_PROMPT, + client: AsyncOpenAI | None = None, + max_tokens: int | None = None, + ): + super().__init__(prompt_string=prompt_string, model=model, client=client, max_tokens=max_tokens) + + async def call(self, items: list[SupportedMessageTypes], **kwargs) -> str: + query = self.query(items, **kwargs) + encoding = self.validate(query["content"]) + response = await self.completion(query) + return response.choices[0].message.content + + def validate(self, content: str) -> list[int]: + encoding = self.encoder.encode(content) + if enc_size := len(encoding) < self.max_tokens: + message = ( + f"The provided prompt is {enc_size} tokens long but the chosen model {self.model} only supports" + f"a maximum of {self.max_tokens} tokens. Please reduce the prompt size." + ) + raise ContentLengthError(message) + return encoding + + +DEFAULT_MAP_PROMPT = DEFAULT_SUMMARIZER_PROMPT + + +class Map(ConversationSummarizer): + def __init__( + self, + model: str, + prompt_string: str = DEFAULT_MAP_PROMPT, + client: AsyncOpenAI | None = None, + max_tokens: int | None = None, + ): + super().__init__(prompt_string=prompt_string, model=model, client=client, max_tokens=max_tokens) + + async def call(self, items: list[SupportedMessageTypes], **kwargs) -> list[str]: + encoding = self.encoder.encode(self.prompt_content(items)) + + queries = (self.query(self.encoder.decode(chunk)) for chunk in chunker(encoding, self.max_tokens - 50)) + responses = await gather(*[self.completion(query) for query in queries]) + return [resp.choices[0].message.content for resp in responses] + + +class MapReduceSummarization(BaseChat): + def __init__(self, *args, map: Map, reduce: Reduce, **kwargs): + self.map = map + self.reduce = reduce + super().__init__(*args, **kwargs) + + async def call(self, items: list[SupportedMessageTypes], **kwargs) -> str: + reduction: str | None = None + while reduction is None: + items = [SystemMessage(content=content) for content in await self.map.call(items, **kwargs)] + + try: + reduction = await self.reduce.call(items, **kwargs) + except ContentLengthError: + pass + + return reduction + + +DEFAULT_PROGRESSIVE_PROMPT = DEFAULT_SUMMARIZER_PROMPT + + +class ProgressiveSummarization(ConversationSummarizer): + def __init__( + self, + model: str, + prompt_string: str = DEFAULT_PROGRESSIVE_PROMPT, + client: AsyncOpenAI | None = None, + max_tokens: int | None = None, + ): + super().__init__(prompt_string=prompt_string, model=model, client=client, max_tokens=max_tokens) + + async def call(self, items: list[SupportedMessageTypes], **kwargs) -> str: + content = self.prompt(items, **kwargs) + encoding = self.encoder.encode(content) + while len(encoding) > self.max_tokens: + query = self.query(self.encoder.decode(encoding[: self.max_tokens]), **kwargs) + response = await self.completion(query) + + content = "\n".join([response.choices[0].message.content, self.encoder.decode(encoding[self.max_tokens :])]) + encoding = self.encoder.encode(self.prompt_string.format(content=content, **kwargs)) + + return content + + +class ToolSummarization(BaseChat): + def __init__(self, strategy: ProgressiveSummarization | MapReduceSummarization, **kwargs): + self.strategy = strategy + kwargs.setdefault("client", self.strategy.client) + kwargs.setdefault("model", self.strategy.model) + kwargs.setdefault("max_tokens", self.strategy.max_tokens) + + super().__init__(**kwargs) + + @staticmethod + def tool_segments(items: list[SupportedMessageTypes]) -> ToolSegmentReturnType: + return tool_segments(items) + + async def call(self, items: list[SupportedMessageTypes], **kwargs) -> str: + if len(items) == 0: + return "" + + segment: list[SupportedMessageTypes] = [] + for pre_tool_segment, tool_segment in self.tool_segments(items): + content = await self.strategy.call([*segment, *pre_tool_segment], **kwargs) + pre_tool_context = SystemMessage(content=content) + + if tool_segment is not None: + tool_summary = await self.strategy.call([pre_tool_context, *tool_segment], **kwargs) + segment = [SystemMessage(content=tool_summary)] + else: + segment = [pre_tool_context] + + return segment[0].content + + +DEFAULT_GRAI_PROMPT = """The following is a conversation between a user and an openai AI agent. +'{content}' +The user is attempting to answer the following question: +'{question}' +Please distill the conversation to it's essence including all information needed by a future agent to +answer the user's question. +""" + + +class GraiSummarization(BaseChat): + def __init__(self, model: str, client: openai.AsyncOpenAI, max_tokens: int): + self.progressive = ProgressiveSummarization( + model=model, prompt_string=DEFAULT_GRAI_PROMPT, client=client, max_tokens=max_tokens + ) + + self.map_reduce = MapReduceSummarization( + model=model, + client=client, + max_tokens=max_tokens, + map=Map(model=model, client=client, max_tokens=max_tokens, prompt_string=DEFAULT_GRAI_PROMPT), + reduce=Reduce(model=model, client=client, max_tokens=max_tokens, prompt_string=DEFAULT_GRAI_PROMPT), + ) + + question_request_prompt = """The following is a conversation between a user and an openai AI agent. + '{content}' + Please identify the problem the user needs help with. Your response MUST be written as if you were the user and + end in a question mark." + """ + + self.conversation = ConversationSummarizer( + model=model, client=client, max_tokens=max_tokens, prompt_string=question_request_prompt + ) + self.tool = ToolSummarization(strategy=self.progressive) + self.max_tokens = max_tokens + super().__init__(model=model, client=client, max_tokens=max_tokens) + + @staticmethod + def user_messages(items: list[SupportedMessageTypes]) -> int: + i = 0 + + for i, item in enumerate(items[::-1]): + if item.role == "user": + break + idx = len(items) - i + return idx + + async def get_question(self, items: list[SupportedMessageTypes]) -> UserMessage: + last_user_message_idx = self.user_messages(items) + encoding = self.conversation.encoder.encode(self.conversation.prompt(items[: last_user_message_idx + 1])) + + content = self.conversation.encoder.decode(encoding[-min(self.max_tokens, 1000) :]) + response = await self.conversation.completion({"role": "system", "content": content}) + + return UserMessage(content=response.choices[0].message.content) + + async def call(self, items: list[SupportedMessageTypes]) -> list[SystemMessage | UserMessage]: + user_question = await self.get_question(items) + print(user_question) + tool_max_tokens = self.tool.max_tokens + self.tool.max_tokens = tool_max_tokens - len(self.tool.strategy.encoder.encode(user_question.content)) - 10 + summary = await self.tool.call(items, question=user_question.content) + self.tool.max_tokens = tool_max_tokens + + responses = [SystemMessage(content=summary), user_question] + return responses + + prompt = """ + You've requested a tool to help you with your problem, however the response from the tool was too long + to fit in the context window. The tool response requested was {message.message.name} with arguments + {message.message.args}. Please provide a brief description of the details you're looking for which a future + agent will use to summarize the tool response. Ensure you do not actually call any tools in your response. + """ diff --git a/grai-server/app/grAI/tests/test_authentication.py b/grai-server/app/grAI/tests/test_authentication.py index 86409662c..08b91c0ac 100644 --- a/grai-server/app/grAI/tests/test_authentication.py +++ b/grai-server/app/grAI/tests/test_authentication.py @@ -20,7 +20,6 @@ async def test_workspace_path_auth_middleware(self, user, workspace, membership, connected, _ = await communicator.connect() assert connected assert "metadata" in communicator.scope - assert communicator.scope["metadata"]["workspace_id"] == str(workspace.id) assert communicator.scope["metadata"]["membership"] == membership @pytest.mark.asyncio diff --git a/grai-server/app/grAI/tests/test_tools.py b/grai-server/app/grAI/tests/test_tools.py new file mode 100644 index 000000000..d0ded217c --- /dev/null +++ b/grai-server/app/grAI/tests/test_tools.py @@ -0,0 +1,17 @@ +from grAI.tools import NHopQueryAPI +import asyncio +from workspaces.models import Workspace, Organisation +import pytest +from lineage.models import Node +from django_multitenant.utils import set_current_tenant + + +# @pytest.mark.django_db(transaction=True) +# def test_n_hop_query(): +# +# set_current_tenant(Organisation.objects.get(name="default")) +# workspace = Workspace.objects.get(name="default") +# api = NHopQueryAPI(workspace.id) +# call_args = api.schema_model(name="grai_bigquery_demo.customers", namespace="default", request_context="") +# result = asyncio.run(api.call(**call_args.dict())) +# diff --git a/grai-server/app/grAI/tests/test_utils.py b/grai-server/app/grAI/tests/test_utils.py new file mode 100644 index 000000000..dda762af4 --- /dev/null +++ b/grai-server/app/grAI/tests/test_utils.py @@ -0,0 +1,150 @@ +from grAI.utils import tool_segments +from grAI.chat_types import SystemMessage, AIMessage, FunctionMessage, UserMessage +from openai.types.chat.chat_completion_message import ChatCompletionMessage +from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall +import pytest + + +example_tool_call = ChatCompletionMessageToolCall( + id="tool_call_1", function={"arguments": "", "name": ""}, type="function" +) +chat_completion = ChatCompletionMessage(content=None, role="assistant", tool_calls=[example_tool_call]) + +example_tool_call2 = ChatCompletionMessageToolCall( + id="tool_call_2", function={"arguments": "", "name": ""}, type="function" +) +chat_completion2 = ChatCompletionMessage(content=None, role="assistant", tool_calls=[example_tool_call2]) + + +class TestToolSegmentation: + def test_no_tools(self): + messages = [ + UserMessage(content="Hello"), + AIMessage(content="How are you?"), + UserMessage(content="I'm good, how are you?"), + AIMessage(content="I'm good too!"), + ] + result = list(tool_segments(messages)) + assert len(result) == 1 + assert result[0][0] == messages + assert result[0][1] is None + + def test_no_tools_all_message_types(self): + messages = [ + SystemMessage(content="Hello"), + UserMessage(content="Hello"), + AIMessage(content="How are you?"), + UserMessage(content="I'm good, how are you?"), + ChatCompletionMessage(content="I'm good too!", role="assistant"), + ] + result = list(tool_segments(messages)) + assert len(result) == 1 + assert result[0][0] == messages + assert result[0][1] is None + + def test_messages_with_tools(self): + messages = [ + SystemMessage(content="Hello"), + UserMessage(content="Hello"), + chat_completion, + FunctionMessage(content="I'm good too!", role="tool", name="test", tool_call_id="tool_call_1", args={}), + ] + result = list(tool_segments(messages)) + assert len(result) == 1 + assert result[0][0] == messages[0:2] + assert result[0][1] == [messages[-1]] + + def test_messages_with_tools_multiple(self): + messages = [ + SystemMessage(content="Hello"), + UserMessage(content="Hello"), + chat_completion, + FunctionMessage(content="I'm good too!", role="tool", name="test", tool_call_id="tool_call_1", args={}), + FunctionMessage(content="I'm good too!", role="tool", name="test", tool_call_id="tool_call_2", args={}), + ] + result = list(tool_segments(messages)) + assert len(result) == 1 + assert result[0][0] == messages[0:2] + assert result[0][1] == messages[-2:] + + def test_messages_with_tools_multiple2(self): + messages = [ + SystemMessage(content="Hello"), + UserMessage(content="Hello"), + chat_completion, + FunctionMessage(content="I'm good too!", role="tool", name="test", tool_call_id="tool_call_1", args={}), + chat_completion2, + FunctionMessage(content="I'm good too!", role="tool", name="test", tool_call_id="tool_call_2", args={}), + ] + list(tool_segments(messages)) + + def test_messages_with_multiple_tool_segments(self): + messages = [ + SystemMessage(content="Hello 1"), + UserMessage(content="Hello 1"), + chat_completion, + FunctionMessage(content="I'm good too!", role="tool", name="test", tool_call_id="tool_call_1", args={}), + SystemMessage(content="Hello 2"), + UserMessage(content="Hello 2"), + chat_completion, + FunctionMessage(content="I'm good too!", role="tool", name="test", tool_call_id="tool_call_2", args={}), + ] + result = list(tool_segments(messages)) + assert len(result) == 2 + assert result[0][0] == messages[0:2] + assert result[0][1] == [messages[3]] + assert result[1][0] == messages[4:6] + assert result[1][1] == [messages[-1]] + + def test_message_starts_with_completion(self): + messages = [ + chat_completion, + FunctionMessage(content="I'm good too!", role="tool", name="test", tool_call_id="tool_call_1", args={}), + SystemMessage(content="Hello 1"), + UserMessage(content="Hello 1"), + ] + result = list(tool_segments(messages)) + assert len(result) == 2 + assert result[0][0] == [] + assert result[0][1] == [messages[1]] + assert result[1][0] == messages[2:] + assert result[1][1] is None + + @pytest.mark.xfail(strict=True) + def test_message_starting_with_tool(self): + messages = [ + FunctionMessage(content="I'm good too!", role="tool", name="test", tool_call_id="tool_call_1", args={}), + SystemMessage(content="Hello 1"), + UserMessage(content="Hello 1"), + chat_completion, + ] + result = list(tool_segments(messages)) + + @pytest.mark.xfail(strict=True) + def test_message_starting_with_tool_completion(self): + messages = [ + chat_completion, + SystemMessage(content="Hello 1"), + UserMessage(content="Hello 1"), + ] + result = list(tool_segments(messages)) + + @pytest.mark.xfail(strict=True) + def test_message_with_tool_without_completion(self): + messages = [ + SystemMessage(content="Hello 1"), + UserMessage(content="Hello 1"), + FunctionMessage(content="I'm good too!", role="tool", name="test", tool_call_id="tool_call_1", args={}), + ] + result = list(tool_segments(messages)) + + @pytest.mark.xfail(strict=True) + def test_message_with_completion_without_tool(self): + messages = [ + SystemMessage(content="Hello 1"), + UserMessage(content="Hello 1"), + chat_completion, + SystemMessage(content="Hello 1"), + UserMessage(content="Hello 1"), + ] + result = list(tool_segments(messages)) diff --git a/grai-server/app/grAI/tools.py b/grai-server/app/grAI/tools.py new file mode 100644 index 000000000..da2ba681c --- /dev/null +++ b/grai-server/app/grAI/tools.py @@ -0,0 +1,404 @@ +import uuid +from abc import ABC, abstractmethod + +from lineage.models import Edge, Node, NodeEmbeddings, Source +from pydantic import BaseModel, Field +from grai_schemas.serializers import GraiYamlSerializer +from django.db.models import Q +from channels.db import database_sync_to_async +from pgvector.django import MaxInnerProduct + +from typing import Annotated, Any, Callable, Literal, ParamSpec, Type, TypeVar, Union +from connections.adapters.schemas import model_to_schema +import tiktoken +import openai +from grai_schemas.v1.edge import EdgeV1 +from grAI.encoders import Embedder + + +T = TypeVar("T") +R = TypeVar("R") +P = ParamSpec("P") + +RoleType = Union[Literal["user"], Literal["system"], Literal["assistant"]] + + +def filter_node_content(node: "Node") -> dict: + from connections.adapters.schemas import model_to_schema + + spec_keys = ["name", "namespace", "metadata", "data_sources"] + + result: dict = model_to_schema(node, "NodeV1").spec.dict(exclude_none=True) + result = {key: result[key] for key in spec_keys} + result["metadata"] = result["metadata"]["grai"] + return result + + +class API(ABC): + schema_model: BaseModel + description: str + id: str + + @abstractmethod + def call(self, **kwargs) -> (Any, str): + pass + + def serialize(self, result) -> str: + if isinstance(result, str): + return result + + return GraiYamlSerializer.dump(result) + + async def response(self, **kwargs) -> str: + obj, message = await self.call(**kwargs) + + if message is None: + result = self.serialize(obj) + else: + result = f"{self.serialize(obj)}\n{message}" + + return result + + def gpt_definition(self) -> dict: + return { + "type": "function", + "function": {"name": self.id, "description": self.description, "parameters": self.schema_model.schema()}, + } + + +class NodeIdentifier(BaseModel): + name: str = Field(description="The nodes name") + namespace: str = Field(description="The nodes namespace") + request_context: str = Field(description="A brief description of the relevant data needed from the node.") + + +class NodeLookupAPI(API): + id = "node_lookup" + description = "Lookup metadata about one or more nodes if you know precisely which node to lookup" + schema_model = NodeIdentifier + + def __init__(self, workspace: str | uuid.UUID): + self.workspace = workspace + + @staticmethod + def response_message(result_set: list) -> str | None: + total_results = len(result_set) + if total_results == 0: + message = "No results found matching the query." + else: + message = None + + return message + + @database_sync_to_async + def call(self, **kwargs) -> (list[Node], str | None): + def reduce_response(response: Node) -> dict: + try: + parsed: dict = model_to_schema(response, "NodeV1").spec.dict() + except: + return {} + + spec_keys = ["name", "namespace", "display_name", "metadata", "data_sources"] + + reduced_response = {key: parsed[key] for key in spec_keys} + reduced_response["metadata"] = reduced_response["metadata"]["grai"] + return reduced_response + + validation = self.schema_model(**kwargs) + query = Q(name=validation.name, namespace=validation.namespace) + result_query = Node.objects.filter(workspace=self.workspace).filter(query).prefetch_related("data_sources") + response_items = [reduce_response(node) for node in result_query.all()] + + return response_items, self.response_message(response_items) + + +class SourceIdentifier(BaseModel): + name: str | None = Field(description="The name of the source to lookup or None to return all sources", default=None) + + +class SourceLookupAPI(API): + id = "source_lookup" + description = "Lookup metadata about one or more nodes if you know precisely which node to lookup" + schema_model = SourceIdentifier + + def __init__(self, workspace: str | uuid.UUID): + self.workspace = workspace + + def response_message(self, result_set: list) -> str | None: + total_results = len(result_set) + if total_results == 0: + message = "No results found matching the query." + else: + message = None + + return message + + @database_sync_to_async + def call(self, **kwargs) -> (list[Node], str | None): + def reduce_response(response: Node) -> dict: + try: + parsed: dict = model_to_schema(response, "SourceV1").spec.dict() + except: + parsed = {} + + return parsed + + validation = self.schema_model(**kwargs) + query = Q() + if validation.name is not None: + query |= Q(name=validation.name) + + result_query = Source.objects.filter(workspace=self.workspace).filter(query) + response_items = [reduce_response(source) for source in result_query.all()] + + return response_items, self.response_message(response_items) + + +class FuzzyMatchQuery(BaseModel): + string: str = Field(description="The fuzzy string used to search amongst node names") + + +class FuzzyMatchNodesAPI(API): + id = "node_fuzzy_lookup" + description = "Performs a fuzzy search for nodes matching a name regardless of namespace" + schema_model = FuzzyMatchQuery + + def __init__(self, workspace: str | uuid.UUID): + self.workspace = workspace + + @staticmethod + def response_message(result_set: list[Node]) -> str | None: + total_results = len(result_set) + if total_results == 0: + message = "No results found matching these query conditions." + else: + message = None + + return message + + @database_sync_to_async + def call(self, string: str) -> (list, str | None): + query = Q(name__contains=string) | Q(display_name__contains=string) + result_set = Node.objects.filter(workspace=self.workspace).filter(query).order_by("-created_at").all() + response_items = [ + {"name": node.name, "namespace": node.namespace, "display_name": node.display_name} for node in result_set + ] + + return response_items, self.response_message(result_set) + + +class EdgeLookupSchema(BaseModel): + source: NodeIdentifier = Field(description="The primary key of the source node on an edge") + destination: NodeIdentifier = Field(description="The primary key of the destination node on an edge") + request_context: str = Field(description="A brief description of the relevant data needed about the node.") + + +class EdgeLookupAPI(API): + id = "edge_lookup" + description = """ + This function Supports looking up edges from a data lineage graph. For example, a query with name=Test but no + namespace value will return all edges explicitly named "Test" regardless of namespace. + Edges are uniquely identified both by their (name, namespace), and by the (source, destination) nodes they connect. + """ + schema_model = EdgeLookupSchema + + def __init__(self, workspace: str | uuid.UUID): + self.workspace = workspace + + @staticmethod + def response_message(result_set: list[Edge]) -> str | None: + total_results = len(result_set) + if total_results == 0: + message = "No results found matching these query conditions." + else: + message = None + + return message + + @database_sync_to_async + def call(self, **kwargs) -> tuple[list[EdgeV1], str | None]: + try: + validation = self.schema_model(**kwargs) + except Exception as e: + return [], f"Invalid input. {e}" + + query = Q() + query &= Q(source__name=validation.source.name, source__namespace=validation.source.namespace) + query &= Q( + destination__name=validation.destination.name, destination__namespace=validation.destination.namespace + ) + + results = Edge.objects.filter(workspace=self.workspace).filter(query).all() + return model_to_schema(results, "EdgeV1"), self.response_message(results) + + +class NHopQuerySchema(BaseModel): + name: str = Field(description="The name of the node to query for") + namespace: str = Field(description="The namespace of the node to query for") + n: int = Field(description="The number of hops to query for", default=1) + request_context: str = Field(description="A brief description of the relevant data needed about the node.") + + +class NHopQueryAPI(API): + id: str = "n_hop_query" + description: str = """Query for a list of edges up to n hops in the graph from a node. + Edges are identified (source, destination node, edge type) e.g. + + RESPONSE: + (name1,namespace1),(name2,namespace2),edge_type + ... + """ + schema_model = NHopQuerySchema + + def __init__(self, workspace: str | uuid.UUID): + self.workspace = workspace + + @staticmethod + def response_message(result_set: list[str]) -> str | None: + total_results = len(result_set) + if total_results == 0: + message = "No results found matching these query conditions." + else: + message = None + + return message + + @staticmethod + def filter(queryset: list[Edge], source_nodes: list[Node], dest_nodes: list[Node]) -> tuple[list[Node], list[Node]]: + def get_id(node: Edge | Node) -> tuple[str, str]: + return node.name, node.namespace + + source_ids: set[T] = {get_id(node) for node in source_nodes} + dest_ids: set[T] = {get_id(node) for node in dest_nodes} + query_hashes: set[tuple[T, T]] = {(get_id(node.source), get_id(node.destination)) for node in queryset} + + source_resp = [n.destination for n, hashes in zip(queryset, query_hashes) if hashes[0] in source_ids] + dest_resp = [n.source for n, hashes in zip(queryset, query_hashes) if hashes[1] in dest_ids] + return source_resp, dest_resp + + @database_sync_to_async + def call(self, **kwargs) -> (list[dict], str | None): + def filter_edge(edge: EdgeV1) -> dict: + result = { + "name": edge.spec.name, + "namespace": edge.spec.namespace, + "source": edge.spec.source, + "destination": edge.spec.destination, + "edge_type": edge.spec.metadata.grai.edge_type, + } + return result + + try: + inp = self.schema_model(**kwargs) + except Exception as e: + return [], f"Invalid input: {e}" + + source_node = Node.objects.filter(workspace__id=self.workspace, name=inp.name, namespace=inp.namespace).first() + if source_node is None: + return [], self.response_message([]) + + source_nodes = [source_node] + dest_nodes = [source_node] + + return_edges = [] + for i in range(inp.n): + query = Q(source__in=source_nodes) | Q(destination__in=dest_nodes) + edges = Edge.objects.filter(query).prefetch_related("source", "destination").all() + + source_nodes, dest_nodes = self.filter(edges, source_nodes, dest_nodes) + return_edges.extend(list(edges)) + + if len(source_nodes) == 0 and len(dest_nodes) == 0: + break + + results = set(model_to_schema(return_edges, "EdgeV1")) + results = [filter_edge(item) for item in results] + return results, self.response_message(return_edges) + + +class EmbeddingSearchSchema(BaseModel): + search_term: str = Field(description="Search request string") + limit: int = Field(description="number of results to return", default=10) + + +class EmbeddingSearchAPI(API): + id: str = "embedding_search_api" + description: str = ( + "Search for nodes matching any query. Results are returned in the following format:\n" + "(node1 name, node1 namespace, node1 type)\n(node2 name, node2 namespace, node2 type)\n..." + ) + schema_model = EmbeddingSearchSchema + + def __init__(self, workspace: str | uuid.UUID): + self.workspace = workspace + + @staticmethod + def response_message(result_set: list) -> str | None: + total_results = len(result_set) + if total_results == 0: + message = "No results found matching these query conditions." + else: + message = None + + return message + + @database_sync_to_async + def nearest_neighbor_search(self, vector_query: list[int], limit=10) -> list[Node]: + node_result = ( + NodeEmbeddings.objects.filter(node__workspace__id=self.workspace) + .order_by(MaxInnerProduct("embedding", vector_query)) + .select_related("node")[:limit] + ) + + return [n.node for n in node_result] + + async def call(self, **kwargs) -> tuple[str, str | None]: + try: + inp = self.schema_model(**kwargs) + except Exception as e: + return f"Invalid input. {e}", None + + embedding_resp = await Embedder.get_embedding(inp.search_term) + embedding = list(embedding_resp.data[0].embedding) + + neighbors = await self.nearest_neighbor_search(embedding, inp.limit) + response = ((n.name, n.namespace, n.metadata["grai"]["node_type"]) for n in neighbors) + response_str = "\n".join([", ".join(vals) for vals in response]) + + return response_str, self.response_message(neighbors) + + +class LoadGraph(API): + id = "load_graph" + description = "" + schema_model = BaseModel + + def __init__(self, workspace): + self.workspace = workspace + + @database_sync_to_async + def call(self): + nodes = model_to_schema(Node.objects.filter(workspace=self.workspace).all(), "NodeV1") + edges = model_to_schema(Edge.objects.filter(workspace=self.workspace).all(), "EdgeV1") + + return [*nodes, *edges], None + + +class InvalidApiSchema(BaseModel): + pass + + +class InvalidAPI(API): + id = "invalid_api_endpoint" + description = "placeholder" + schema_model = InvalidApiSchema + + def __init__(self, apis): + self.function_string = ", ".join([f"`{api.id}`" for api in apis]) + + @database_sync_to_async + def call(self, *args, **kwargs) -> tuple[str, str | None]: + return ( + f"Invalid API Endpoint. That function does not exist. The supported apis are {self.function_string}", + None, + ) diff --git a/grai-server/app/grAI/utils.py b/grai-server/app/grAI/utils.py new file mode 100644 index 000000000..eafec6780 --- /dev/null +++ b/grai-server/app/grAI/utils.py @@ -0,0 +1,83 @@ +from itertools import islice +from multimethod import multimethod +import tiktoken +from typing import Any, Iterable +from grAI.chat_types import SupportedMessageTypes, BaseMessage, FunctionMessage +from openai.types.chat.chat_completion_message import ChatCompletionMessage + +OPENAI_TOKEN_LIMITS = { + "gpt-3.5-turbo": 4096, + "gpt-3.5-turbo-16k": 16385, + "gpt-3.5-turbo-1106": 16385, + "gpt-4": 8192, + "gpt-4-0613": 8192, + "gpt-4-32k": 32768, + "gpt-4-1106-preview": 128000, +} + + +def get_token_limit(model_type: str) -> int: + if model_type in OPENAI_TOKEN_LIMITS: + return OPENAI_TOKEN_LIMITS[model_type] + elif model_type.endswith("k"): + return int(model_type.split("-")[-1]) * 1024 + elif model_type.startswith("gpt-4"): + return 8192 + elif model_type.startswith("gpt-3.5"): + return 4096 + else: + return 2049 + + +def chunker(it, size): + iterator = iter(it) + while chunk := list(islice(iterator, size)): + yield chunk + + +def get_message_token_count(message, encoder) -> int: + if message.content is not None: + return len(encoder.encode(message.content)) + elif isinstance(message, ChatCompletionMessage) and message.tool_calls is not None: + return sum(len(encoder.encode(call.json())) for call in message.tool_calls) + else: + raise ValueError("Message must have either content or tool_calls") + + +def compute_total_tokens(messages: list[SupportedMessageTypes], encoder: tiktoken.Encoding) -> int: + return sum(get_message_token_count(message, encoder) for message in messages) + + +ToolSegmentReturnType = tuple[list[SupportedMessageTypes], list[FunctionMessage] | None] + + +def tool_segments(items: list[SupportedMessageTypes]) -> list[ToolSegmentReturnType]: + pre_tool_segment: list = [] + tool_segment: list | None = None + result = [] + for item in items: + if tool_segment is None: + if isinstance(item, ChatCompletionMessage) and item.tool_calls is not None: + tool_segment = [] + # pre_tool_segment = [] + elif item.role == "tool": + raise ValueError("Encountered a tool response message without a preceding tool call message.") + else: + pre_tool_segment.append(item) + else: + if item.role == "tool": + tool_segment.append(item) + elif len(tool_segment) == 0: + raise ValueError(f"Encountered a tool call message without any subsequent tool responses.") + else: + result.append((pre_tool_segment, tool_segment)) + # yield pre_tool_segment, tool_segment + if isinstance(item, ChatCompletionMessage) and item.tool_calls is not None: + tool_segment = [] + pre_tool_segment = [] + else: + pre_tool_segment = [item] + tool_segment = None + result.append((pre_tool_segment, tool_segment)) + return result + # yield pre_tool_segment, tool_segment diff --git a/grai-server/app/lineage/admin.py b/grai-server/app/lineage/admin.py index 4a9c18baa..fae538ba4 100755 --- a/grai-server/app/lineage/admin.py +++ b/grai-server/app/lineage/admin.py @@ -7,7 +7,7 @@ from common.admin.fields.json_widget import PrettyJSONWidget from connections.models import Connection, Run -from .models import Edge, Event, Filter, Node, Source +from .models import Edge, Event, Filter, Node, Source, NodeEmbeddings @admin.action(description="Force delete selected sources") @@ -198,8 +198,30 @@ class SourceAdmin(admin.ModelAdmin): ] +def update_embedding(modeladmin, request, queryset): # pragma: no cover + from lineage.tasks import update_node_vector_index + + for embedding in queryset: + embedding.update_embedding() + + +class NodeEmbeddingAdmin(admin.ModelAdmin): + list_display = ( + "pk", + "natural_key", + "created_at", + "updated_at", + ) + search_fields = ["node", "created_at", "updated_at"] + + actions = [ + update_embedding, + ] + + admin.site.register(Node, NodeAdmin) admin.site.register(Edge, EdgeAdmin) admin.site.register(Filter, FilterAdmin) admin.site.register(Event) admin.site.register(Source, SourceAdmin) +admin.site.register(NodeEmbeddings, NodeEmbeddingAdmin) diff --git a/grai-server/app/lineage/migrations/0018_vector_embeddings.py.bak b/grai-server/app/lineage/migrations/0018_nodeembeddings.py similarity index 81% rename from grai-server/app/lineage/migrations/0018_vector_embeddings.py.bak rename to grai-server/app/lineage/migrations/0018_nodeembeddings.py index 16dee4d63..c4a537e08 100644 --- a/grai-server/app/lineage/migrations/0018_vector_embeddings.py.bak +++ b/grai-server/app/lineage/migrations/0018_nodeembeddings.py @@ -1,5 +1,5 @@ -# Generated by Django 4.2.6 on 2023-10-31 17:27 - +# Generated by Django 4.2.7 on 2023-12-01 23:29 +from pgvector.django import VectorExtension from django.db import migrations, models import django.db.models.deletion import pgvector.django @@ -11,7 +11,7 @@ class Migration(migrations.Migration): ] operations = [ - pgvector.django.VectorExtension(), + VectorExtension(), migrations.CreateModel( name="NodeEmbeddings", fields=[ @@ -25,6 +25,8 @@ class Migration(migrations.Migration): to="lineage.node", ), ), + ("created_at", models.DateTimeField(auto_now_add=True)), + ("updated_at", models.DateTimeField(auto_now=True)), ], options={ "indexes": [ diff --git a/grai-server/app/lineage/models.py b/grai-server/app/lineage/models.py index a5644715c..e8bdc881f 100755 --- a/grai-server/app/lineage/models.py +++ b/grai-server/app/lineage/models.py @@ -36,6 +36,9 @@ class Node(TenantModel): # "users.User", related_name="created_by", on_delete=models.PROTECT # ) + def natural_key(self) -> tuple[uuid.UUID, str, str]: + return self.workspace.id, self.namespace, self.name + def search_type(self): return self.metadata.get("grai", {}).get("node_type", "Node") @@ -55,9 +58,6 @@ def save(self, *args, **kwargs): super().save(*args, **kwargs) self.cache_model() - # if self.workspace.ai_enabled and settings.HAS_OPENAI: - # update_node_vector_index.delay(self.id) - def delete(self, *args, **kwargs): super().delete(*args, **kwargs) self.cache_model(delete=True) @@ -96,24 +96,32 @@ class Meta: ] -# class NodeEmbeddings(models.Model): -# embedding = VectorField(dimensions=1536) -# node = models.OneToOneField( -# Node, -# on_delete=models.CASCADE, -# primary_key=True, -# ) -# -# class Meta: -# indexes = [ -# HnswIndex( -# name="node_embedding_index", -# fields=["embedding"], -# m=64, -# ef_construction=128, # should be at least 2x m. -# opclasses=["vector_ip_ops"], -# ) -# ] +class NodeEmbeddings(models.Model): + embedding = VectorField(dimensions=1536) + node = models.OneToOneField( + Node, + on_delete=models.CASCADE, + primary_key=True, + ) + created_at = models.DateTimeField(auto_now_add=True) + updated_at = models.DateTimeField(auto_now=True) + + def natural_key(self): + return self.node.natural_key() + + def update_embedding(self): + update_node_vector_index.delay(self.node.id) + + class Meta: + indexes = [ + HnswIndex( + name="node_embedding_index", + fields=["embedding"], + m=64, + ef_construction=128, # should be at least 2x m. + opclasses=["vector_ip_ops"], + ) + ] class Edge(TenantModel): diff --git a/grai-server/app/lineage/tasks.py b/grai-server/app/lineage/tasks.py index 06686fd30..c5aba91b0 100644 --- a/grai-server/app/lineage/tasks.py +++ b/grai-server/app/lineage/tasks.py @@ -1,15 +1,21 @@ import logging from datetime import datetime -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, TypeVar from uuid import UUID +import asyncio import openai -from django.apps import apps from django.core.cache import cache from django_celery_beat.models import PeriodicTask, PeriodicTasks from grai_schemas.serializers import GraiYamlSerializer from celery import shared_task +from grAI.encoders import Embedder + + +T = TypeVar("T") +R = TypeVar("R") + if TYPE_CHECKING: from lineage.models import Node @@ -19,13 +25,23 @@ class EmbeddingTaskStatus: WAIT = 0 -def create_node_vector_index(node: "Node"): +def get_embedded_node_content(node: "Node") -> str: from connections.adapters.schemas import model_to_schema + + spec_keys = ["name", "namespace", "metadata", "data_sources"] + + result: dict = model_to_schema(node, "NodeV1").spec.dict() + result = {key: result[key] for key in spec_keys} + result["metadata"] = result["metadata"]["grai"] + content = GraiYamlSerializer.dump(result) + return content + + +def create_node_vector_index(node: "Node"): from lineage.models import NodeEmbeddings - schema = model_to_schema(node, "NodeV1") - content = GraiYamlSerializer.dump(schema) - embedding_resp = openai.Embedding.create(input=content, model="text-embedding-ada-002") + content = get_embedded_node_content(node) + embedding_resp = asyncio.run(Embedder.get_embedding(content)) NodeEmbeddings.objects.update_or_create(node=node, embedding=embedding_resp.data[0].embedding) @@ -49,10 +65,10 @@ def update_node_vector_index(self, node_id: UUID, task_id: UUID | None = None): self.retry(countdown=10) return - node = Node.objects.get(id=node_id) + node = Node.objects.prefetch_related("data_sources").get(id=node_id) try: create_node_vector_index(node) - except openai.error.RateLimitError: + except openai.RateLimitError: logging.info(f"Openai rate limit reach retrying in 10 seconds") self.retry(countdown=10) return @@ -65,5 +81,5 @@ def bulk_update_embeddings(): task = PeriodicTask.objects.get(name="lineage:Node:bulk_update_embeddings") last_run_at = task.last_run_at if task.last_run_at is not None else datetime.min - for node in Node.objects.filter(updated_at__gt=last_run_at).all(): - update_node_vector_index.delay(node.id) + for node_id in Node.objects.filter(updated_at__gt=last_run_at).values_list("id", flat=True): + update_node_vector_index.delay(node_id) diff --git a/grai-server/app/poetry.lock b/grai-server/app/poetry.lock index 548a3d56f..4e5ff3292 100644 --- a/grai-server/app/poetry.lock +++ b/grai-server/app/poetry.lock @@ -1,114 +1,5 @@ # This file is automatically @generated by Poetry 1.7.0 and should not be changed by hand. -[[package]] -name = "aiohttp" -version = "3.9.1" -description = "Async http client/server framework (asyncio)" -optional = false -python-versions = ">=3.8" -files = [ - {file = "aiohttp-3.9.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e1f80197f8b0b846a8d5cf7b7ec6084493950d0882cc5537fb7b96a69e3c8590"}, - {file = "aiohttp-3.9.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c72444d17777865734aa1a4d167794c34b63e5883abb90356a0364a28904e6c0"}, - {file = "aiohttp-3.9.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9b05d5cbe9dafcdc733262c3a99ccf63d2f7ce02543620d2bd8db4d4f7a22f83"}, - {file = "aiohttp-3.9.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c4fa235d534b3547184831c624c0b7c1e262cd1de847d95085ec94c16fddcd5"}, - {file = "aiohttp-3.9.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:289ba9ae8e88d0ba16062ecf02dd730b34186ea3b1e7489046fc338bdc3361c4"}, - {file = "aiohttp-3.9.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bff7e2811814fa2271be95ab6e84c9436d027a0e59665de60edf44e529a42c1f"}, - {file = "aiohttp-3.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81b77f868814346662c96ab36b875d7814ebf82340d3284a31681085c051320f"}, - {file = "aiohttp-3.9.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3b9c7426923bb7bd66d409da46c41e3fb40f5caf679da624439b9eba92043fa6"}, - {file = "aiohttp-3.9.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:8d44e7bf06b0c0a70a20f9100af9fcfd7f6d9d3913e37754c12d424179b4e48f"}, - {file = "aiohttp-3.9.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:22698f01ff5653fe66d16ffb7658f582a0ac084d7da1323e39fd9eab326a1f26"}, - {file = "aiohttp-3.9.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ca7ca5abfbfe8d39e653870fbe8d7710be7a857f8a8386fc9de1aae2e02ce7e4"}, - {file = "aiohttp-3.9.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:8d7f98fde213f74561be1d6d3fa353656197f75d4edfbb3d94c9eb9b0fc47f5d"}, - {file = "aiohttp-3.9.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5216b6082c624b55cfe79af5d538e499cd5f5b976820eac31951fb4325974501"}, - {file = "aiohttp-3.9.1-cp310-cp310-win32.whl", hash = "sha256:0e7ba7ff228c0d9a2cd66194e90f2bca6e0abca810b786901a569c0de082f489"}, - {file = "aiohttp-3.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:c7e939f1ae428a86e4abbb9a7c4732bf4706048818dfd979e5e2839ce0159f23"}, - {file = "aiohttp-3.9.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:df9cf74b9bc03d586fc53ba470828d7b77ce51b0582d1d0b5b2fb673c0baa32d"}, - {file = "aiohttp-3.9.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ecca113f19d5e74048c001934045a2b9368d77b0b17691d905af18bd1c21275e"}, - {file = "aiohttp-3.9.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8cef8710fb849d97c533f259103f09bac167a008d7131d7b2b0e3a33269185c0"}, - {file = "aiohttp-3.9.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bea94403a21eb94c93386d559bce297381609153e418a3ffc7d6bf772f59cc35"}, - {file = "aiohttp-3.9.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91c742ca59045dce7ba76cab6e223e41d2c70d79e82c284a96411f8645e2afff"}, - {file = "aiohttp-3.9.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6c93b7c2e52061f0925c3382d5cb8980e40f91c989563d3d32ca280069fd6a87"}, - {file = "aiohttp-3.9.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee2527134f95e106cc1653e9ac78846f3a2ec1004cf20ef4e02038035a74544d"}, - {file = "aiohttp-3.9.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11ff168d752cb41e8492817e10fb4f85828f6a0142b9726a30c27c35a1835f01"}, - {file = "aiohttp-3.9.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b8c3a67eb87394386847d188996920f33b01b32155f0a94f36ca0e0c635bf3e3"}, - {file = "aiohttp-3.9.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c7b5d5d64e2a14e35a9240b33b89389e0035e6de8dbb7ffa50d10d8b65c57449"}, - {file = "aiohttp-3.9.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:69985d50a2b6f709412d944ffb2e97d0be154ea90600b7a921f95a87d6f108a2"}, - {file = "aiohttp-3.9.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:c9110c06eaaac7e1f5562caf481f18ccf8f6fdf4c3323feab28a93d34cc646bd"}, - {file = "aiohttp-3.9.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d737e69d193dac7296365a6dcb73bbbf53bb760ab25a3727716bbd42022e8d7a"}, - {file = "aiohttp-3.9.1-cp311-cp311-win32.whl", hash = "sha256:4ee8caa925aebc1e64e98432d78ea8de67b2272252b0a931d2ac3bd876ad5544"}, - {file = "aiohttp-3.9.1-cp311-cp311-win_amd64.whl", hash = "sha256:a34086c5cc285be878622e0a6ab897a986a6e8bf5b67ecb377015f06ed316587"}, - {file = "aiohttp-3.9.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f800164276eec54e0af5c99feb9494c295118fc10a11b997bbb1348ba1a52065"}, - {file = "aiohttp-3.9.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:500f1c59906cd142d452074f3811614be04819a38ae2b3239a48b82649c08821"}, - {file = "aiohttp-3.9.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0b0a6a36ed7e164c6df1e18ee47afbd1990ce47cb428739d6c99aaabfaf1b3af"}, - {file = "aiohttp-3.9.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69da0f3ed3496808e8cbc5123a866c41c12c15baaaead96d256477edf168eb57"}, - {file = "aiohttp-3.9.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:176df045597e674fa950bf5ae536be85699e04cea68fa3a616cf75e413737eb5"}, - {file = "aiohttp-3.9.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b796b44111f0cab6bbf66214186e44734b5baab949cb5fb56154142a92989aeb"}, - {file = "aiohttp-3.9.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f27fdaadce22f2ef950fc10dcdf8048407c3b42b73779e48a4e76b3c35bca26c"}, - {file = "aiohttp-3.9.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bcb6532b9814ea7c5a6a3299747c49de30e84472fa72821b07f5a9818bce0f66"}, - {file = "aiohttp-3.9.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:54631fb69a6e44b2ba522f7c22a6fb2667a02fd97d636048478db2fd8c4e98fe"}, - {file = "aiohttp-3.9.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:4b4c452d0190c5a820d3f5c0f3cd8a28ace48c54053e24da9d6041bf81113183"}, - {file = "aiohttp-3.9.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:cae4c0c2ca800c793cae07ef3d40794625471040a87e1ba392039639ad61ab5b"}, - {file = "aiohttp-3.9.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:565760d6812b8d78d416c3c7cfdf5362fbe0d0d25b82fed75d0d29e18d7fc30f"}, - {file = "aiohttp-3.9.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:54311eb54f3a0c45efb9ed0d0a8f43d1bc6060d773f6973efd90037a51cd0a3f"}, - {file = "aiohttp-3.9.1-cp312-cp312-win32.whl", hash = "sha256:85c3e3c9cb1d480e0b9a64c658cd66b3cfb8e721636ab8b0e746e2d79a7a9eed"}, - {file = "aiohttp-3.9.1-cp312-cp312-win_amd64.whl", hash = "sha256:11cb254e397a82efb1805d12561e80124928e04e9c4483587ce7390b3866d213"}, - {file = "aiohttp-3.9.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:8a22a34bc594d9d24621091d1b91511001a7eea91d6652ea495ce06e27381f70"}, - {file = "aiohttp-3.9.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:598db66eaf2e04aa0c8900a63b0101fdc5e6b8a7ddd805c56d86efb54eb66672"}, - {file = "aiohttp-3.9.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2c9376e2b09895c8ca8b95362283365eb5c03bdc8428ade80a864160605715f1"}, - {file = "aiohttp-3.9.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41473de252e1797c2d2293804e389a6d6986ef37cbb4a25208de537ae32141dd"}, - {file = "aiohttp-3.9.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9c5857612c9813796960c00767645cb5da815af16dafb32d70c72a8390bbf690"}, - {file = "aiohttp-3.9.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ffcd828e37dc219a72c9012ec44ad2e7e3066bec6ff3aaa19e7d435dbf4032ca"}, - {file = "aiohttp-3.9.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:219a16763dc0294842188ac8a12262b5671817042b35d45e44fd0a697d8c8361"}, - {file = "aiohttp-3.9.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f694dc8a6a3112059258a725a4ebe9acac5fe62f11c77ac4dcf896edfa78ca28"}, - {file = "aiohttp-3.9.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:bcc0ea8d5b74a41b621ad4a13d96c36079c81628ccc0b30cfb1603e3dfa3a014"}, - {file = "aiohttp-3.9.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:90ec72d231169b4b8d6085be13023ece8fa9b1bb495e4398d847e25218e0f431"}, - {file = "aiohttp-3.9.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:cf2a0ac0615842b849f40c4d7f304986a242f1e68286dbf3bd7a835e4f83acfd"}, - {file = "aiohttp-3.9.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:0e49b08eafa4f5707ecfb321ab9592717a319e37938e301d462f79b4e860c32a"}, - {file = "aiohttp-3.9.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2c59e0076ea31c08553e868cec02d22191c086f00b44610f8ab7363a11a5d9d8"}, - {file = "aiohttp-3.9.1-cp38-cp38-win32.whl", hash = "sha256:4831df72b053b1eed31eb00a2e1aff6896fb4485301d4ccb208cac264b648db4"}, - {file = "aiohttp-3.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:3135713c5562731ee18f58d3ad1bf41e1d8883eb68b363f2ffde5b2ea4b84cc7"}, - {file = "aiohttp-3.9.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:cfeadf42840c1e870dc2042a232a8748e75a36b52d78968cda6736de55582766"}, - {file = "aiohttp-3.9.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:70907533db712f7aa791effb38efa96f044ce3d4e850e2d7691abd759f4f0ae0"}, - {file = "aiohttp-3.9.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cdefe289681507187e375a5064c7599f52c40343a8701761c802c1853a504558"}, - {file = "aiohttp-3.9.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7481f581251bb5558ba9f635db70908819caa221fc79ee52a7f58392778c636"}, - {file = "aiohttp-3.9.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:49f0c1b3c2842556e5de35f122fc0f0b721334ceb6e78c3719693364d4af8499"}, - {file = "aiohttp-3.9.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0d406b01a9f5a7e232d1b0d161b40c05275ffbcbd772dc18c1d5a570961a1ca4"}, - {file = "aiohttp-3.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d8e4450e7fe24d86e86b23cc209e0023177b6d59502e33807b732d2deb6975f"}, - {file = "aiohttp-3.9.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c0266cd6f005e99f3f51e583012de2778e65af6b73860038b968a0a8888487a"}, - {file = "aiohttp-3.9.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab221850108a4a063c5b8a70f00dd7a1975e5a1713f87f4ab26a46e5feac5a0e"}, - {file = "aiohttp-3.9.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:c88a15f272a0ad3d7773cf3a37cc7b7d077cbfc8e331675cf1346e849d97a4e5"}, - {file = "aiohttp-3.9.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:237533179d9747080bcaad4d02083ce295c0d2eab3e9e8ce103411a4312991a0"}, - {file = "aiohttp-3.9.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:02ab6006ec3c3463b528374c4cdce86434e7b89ad355e7bf29e2f16b46c7dd6f"}, - {file = "aiohttp-3.9.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04fa38875e53eb7e354ece1607b1d2fdee2d175ea4e4d745f6ec9f751fe20c7c"}, - {file = "aiohttp-3.9.1-cp39-cp39-win32.whl", hash = "sha256:82eefaf1a996060602f3cc1112d93ba8b201dbf5d8fd9611227de2003dddb3b7"}, - {file = "aiohttp-3.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:9b05d33ff8e6b269e30a7957bd3244ffbce2a7a35a81b81c382629b80af1a8bf"}, - {file = "aiohttp-3.9.1.tar.gz", hash = "sha256:8fc49a87ac269d4529da45871e2ffb6874e87779c3d0e2ccd813c0899221239d"}, -] - -[package.dependencies] -aiosignal = ">=1.1.2" -attrs = ">=17.3.0" -frozenlist = ">=1.1.1" -multidict = ">=4.5,<7.0" -yarl = ">=1.0,<2.0" - -[package.extras] -speedups = ["Brotli", "aiodns", "brotlicffi"] - -[[package]] -name = "aiosignal" -version = "1.3.1" -description = "aiosignal: a list of registered asynchronous callbacks" -optional = false -python-versions = ">=3.7" -files = [ - {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, - {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, -] - -[package.dependencies] -frozenlist = ">=1.1.0" - [[package]] name = "algoliasearch" version = "2.6.3" @@ -154,13 +45,13 @@ vine = ">=5.0.0,<6.0.0" [[package]] name = "anyio" -version = "4.1.0" +version = "3.7.1" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false -python-versions = ">=3.8" +python-versions = ">=3.7" files = [ - {file = "anyio-4.1.0-py3-none-any.whl", hash = "sha256:56a415fbc462291813a94528a779597226619c8e78af7de0507333f700011e5f"}, - {file = "anyio-4.1.0.tar.gz", hash = "sha256:5a0bec7085176715be77df87fc66d6c9d70626bd752fcc85f57cdbee5b3760da"}, + {file = "anyio-3.7.1-py3-none-any.whl", hash = "sha256:91dee416e570e92c64041bd18b900d1d6fa78dff7048769ce5ac5ddad004fbb5"}, + {file = "anyio-3.7.1.tar.gz", hash = "sha256:44a3c9aba0f5defa43261a8b3efb97891f2bd7d804e0e1f56419befa1adfc780"}, ] [package.dependencies] @@ -168,9 +59,9 @@ idna = ">=2.8" sniffio = ">=1.1" [package.extras] -doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] -trio = ["trio (>=0.23)"] +doc = ["Sphinx", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme (>=1.2.2)", "sphinxcontrib-jquery"] +test = ["anyio[trio]", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] +trio = ["trio (<0.22)"] [[package]] name = "argcomplete" @@ -378,32 +269,32 @@ uvloop = ["uvloop (>=0.15.2)"] [[package]] name = "boto3" -version = "1.33.3" +version = "1.33.9" description = "The AWS SDK for Python" optional = false python-versions = ">= 3.7" files = [ - {file = "boto3-1.33.3-py3-none-any.whl", hash = "sha256:326b563021ed16470779df3bb372d55054f897bf2d72395bdf92693b3eeb4dd3"}, - {file = "boto3-1.33.3.tar.gz", hash = "sha256:8edc92b27a500728d55cf5d69d82ccece163491e274a7783c705010e58b1500f"}, + {file = "boto3-1.33.9-py3-none-any.whl", hash = "sha256:baa6ea61527bcc82365a4ef365aa8f34126483ff8533d01274f3cdb340c22d73"}, + {file = "boto3-1.33.9.tar.gz", hash = "sha256:9486f66f9a89f66d64d25cb4baa55aad323a1734ef9815b7a2c4c0787a3fc1fb"}, ] [package.dependencies] -botocore = ">=1.33.3,<1.34.0" +botocore = ">=1.33.9,<1.34.0" jmespath = ">=0.7.1,<2.0.0" -s3transfer = ">=0.8.0,<0.9.0" +s3transfer = ">=0.8.2,<0.9.0" [package.extras] crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.33.3" +version = "1.33.9" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">= 3.7" files = [ - {file = "botocore-1.33.3-py3-none-any.whl", hash = "sha256:4aed37802aaae325a5abab33de2d7e68acf637086876727a164ea38a1cc219f9"}, - {file = "botocore-1.33.3.tar.gz", hash = "sha256:462528fc8dc1953bc19841fd2ccee1626ec8f5b13d9e451e13452c71de2fe0dc"}, + {file = "botocore-1.33.9-py3-none-any.whl", hash = "sha256:07ff4eea62f546da540310887000c81fca802c788eb374fef06eda28009dcafd"}, + {file = "botocore-1.33.9.tar.gz", hash = "sha256:1b90be11dd9a9a25290c995a6b713e76b9e2bf7bf86ba9036e0d28f2ceb2edfc"}, ] [package.dependencies] @@ -519,13 +410,13 @@ files = [ [[package]] name = "cattrs" -version = "23.2.2" +version = "23.2.3" description = "Composable complex class support for attrs and dataclasses." optional = false python-versions = ">=3.8" files = [ - {file = "cattrs-23.2.2-py3-none-any.whl", hash = "sha256:66064e2060ea207c5a48d065ab1910c10bb8108c28f3df8d1a7b1aa6b19d191b"}, - {file = "cattrs-23.2.2.tar.gz", hash = "sha256:b790b1c2be1ce042611e33f740e343c2593918bbf3c1cc88cdddac4defc09655"}, + {file = "cattrs-23.2.3-py3-none-any.whl", hash = "sha256:0341994d94971052e9ee70662542699a3162ea1e0c62f7ce1b4a57f563685108"}, + {file = "cattrs-23.2.3.tar.gz", hash = "sha256:a934090d95abaa9e911dac357e3a8699e0b4b14f8529bcc7d2b1ad9d51672b9f"}, ] [package.dependencies] @@ -1128,15 +1019,26 @@ files = [ {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"}, ] +[[package]] +name = "distro" +version = "1.8.0" +description = "Distro - an OS platform information API" +optional = false +python-versions = ">=3.6" +files = [ + {file = "distro-1.8.0-py3-none-any.whl", hash = "sha256:99522ca3e365cac527b44bde033f64c6945d90eb9f769703caaec52b09bbd3ff"}, + {file = "distro-1.8.0.tar.gz", hash = "sha256:02e111d1dc6a50abb8eed6bf31c3e48ed8b0830d1ea2a1b78c61765c2513fdd8"}, +] + [[package]] name = "django" -version = "4.2.7" +version = "4.2.8" description = "A high-level Python web framework that encourages rapid development and clean, pragmatic design." optional = false python-versions = ">=3.8" files = [ - {file = "Django-4.2.7-py3-none-any.whl", hash = "sha256:e1d37c51ad26186de355cbcec16613ebdabfa9689bbade9c538835205a8abbe9"}, - {file = "Django-4.2.7.tar.gz", hash = "sha256:8e0f1c2c2786b5c0e39fe1afce24c926040fad47c8ea8ad30aaf1188df29fc41"}, + {file = "Django-4.2.8-py3-none-any.whl", hash = "sha256:6cb5dcea9e3d12c47834d32156b8841f533a4493c688e2718cafd51aa430ba6d"}, + {file = "Django-4.2.8.tar.gz", hash = "sha256:d69d5e36cc5d9f4eb4872be36c622878afcdce94062716cf3e25bcedcb168b62"}, ] [package.dependencies] @@ -1387,13 +1289,13 @@ hiredis = ["redis[hiredis] (>=3,!=4.0.0,!=4.0.1)"] [[package]] name = "django-ses" -version = "3.5.1" +version = "3.5.2" description = "A Django email backend for Amazon's Simple Email Service (SES)" optional = false python-versions = ">=3.8,<4.0" files = [ - {file = "django_ses-3.5.1-py3-none-any.whl", hash = "sha256:313b57b020a13333b6333c084809acfcbc80d5459b39905f5373af145f039de9"}, - {file = "django_ses-3.5.1.tar.gz", hash = "sha256:5d8d260f651d040b744fd82f04a5bcf49efc97f5307adfe44cd2e61ae8011ba2"}, + {file = "django_ses-3.5.2-py3-none-any.whl", hash = "sha256:90c68cc6ca3467893faa8499981c81ba8ff2bd3f3acb08c06423a4142d6a0fc6"}, + {file = "django_ses-3.5.2.tar.gz", hash = "sha256:b6d94689bc15de02a11e84f05a5bf4a7895688e570c6f07c21698094debc6ced"}, ] [package.dependencies] @@ -1453,13 +1355,13 @@ compatible-mypy = ["mypy (>=1.1.1,<1.2)"] [[package]] name = "django-stubs-ext" -version = "4.2.5" +version = "4.2.7" description = "Monkey-patching and extensions for django-stubs" optional = false python-versions = ">=3.8" files = [ - {file = "django-stubs-ext-4.2.5.tar.gz", hash = "sha256:8c4d1fb5f68419b3b2474c659681a189803e27d6a5e5abf5aa0da57601b58633"}, - {file = "django_stubs_ext-4.2.5-py3-none-any.whl", hash = "sha256:921cd7ae4614e74c234bc0fe86ee75537d163addfe1fc6f134bf03e29d86c01e"}, + {file = "django-stubs-ext-4.2.7.tar.gz", hash = "sha256:519342ac0849cda1559746c9a563f03ff99f636b0ebe7c14b75e816a00dfddc3"}, + {file = "django_stubs_ext-4.2.7-py3-none-any.whl", hash = "sha256:45a5d102417a412e3606e3c358adb4744988a92b7b58ccf3fd64bddd5d04d14c"}, ] [package.dependencies] @@ -1536,28 +1438,28 @@ files = [ [[package]] name = "djangorestframework-simplejwt" -version = "5.3.0" +version = "5.3.1" description = "A minimal JSON Web Token authentication plugin for Django REST Framework" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "djangorestframework_simplejwt-5.3.0-py3-none-any.whl", hash = "sha256:631d7ae2ed4365d7196a35d3cc0f6d382f7bd3361fb24c894f8f92b4da5db27d"}, - {file = "djangorestframework_simplejwt-5.3.0.tar.gz", hash = "sha256:8e4c5dfca8d11c0b8a66dfd8a4e3fc1c6aa7ea188d10907ff91c942f4b52ed66"}, + {file = "djangorestframework_simplejwt-5.3.1-py3-none-any.whl", hash = "sha256:381bc966aa46913905629d472cd72ad45faa265509764e20ffd440164c88d220"}, + {file = "djangorestframework_simplejwt-5.3.1.tar.gz", hash = "sha256:6c4bd37537440bc439564ebf7d6085e74c5411485197073f508ebdfa34bc9fae"}, ] [package.dependencies] cryptography = {version = ">=3.3.1", optional = true, markers = "extra == \"crypto\""} django = ">=3.2" -djangorestframework = ">=3.10" +djangorestframework = ">=3.12" pyjwt = ">=1.7.1,<3" [package.extras] crypto = ["cryptography (>=3.3.1)"] -dev = ["Sphinx (>=1.6.5,<2)", "cryptography", "flake8", "ipython", "isort", "pep8", "pytest", "pytest-cov", "pytest-django", "pytest-watch", "pytest-xdist", "python-jose (==3.3.0)", "sphinx-rtd-theme (>=0.1.9)", "tox", "twine", "wheel"] -doc = ["Sphinx (>=1.6.5,<2)", "sphinx-rtd-theme (>=0.1.9)"] +dev = ["Sphinx (>=1.6.5,<2)", "cryptography", "flake8", "freezegun", "ipython", "isort", "pep8", "pytest", "pytest-cov", "pytest-django", "pytest-watch", "pytest-xdist", "python-jose (==3.3.0)", "sphinx_rtd_theme (>=0.1.9)", "tox", "twine", "wheel"] +doc = ["Sphinx (>=1.6.5,<2)", "sphinx_rtd_theme (>=0.1.9)"] lint = ["flake8", "isort", "pep8"] python-jose = ["python-jose (==3.3.0)"] -test = ["cryptography", "pytest", "pytest-cov", "pytest-django", "pytest-xdist", "tox"] +test = ["cryptography", "freezegun", "pytest", "pytest-cov", "pytest-django", "pytest-xdist", "tox"] [[package]] name = "djangorestframework-stubs" @@ -1714,76 +1616,6 @@ files = [ {file = "fivetran-0.7.0.tar.gz", hash = "sha256:f397b3c7e97c07edd3f680aec4e644b5e99d733adff22b91368e48fb55336334"}, ] -[[package]] -name = "frozenlist" -version = "1.4.0" -description = "A list-like structure which implements collections.abc.MutableSequence" -optional = false -python-versions = ">=3.8" -files = [ - {file = "frozenlist-1.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:764226ceef3125e53ea2cb275000e309c0aa5464d43bd72abd661e27fffc26ab"}, - {file = "frozenlist-1.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d6484756b12f40003c6128bfcc3fa9f0d49a687e171186c2d85ec82e3758c559"}, - {file = "frozenlist-1.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9ac08e601308e41eb533f232dbf6b7e4cea762f9f84f6357136eed926c15d12c"}, - {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d081f13b095d74b67d550de04df1c756831f3b83dc9881c38985834387487f1b"}, - {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:71932b597f9895f011f47f17d6428252fc728ba2ae6024e13c3398a087c2cdea"}, - {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:981b9ab5a0a3178ff413bca62526bb784249421c24ad7381e39d67981be2c326"}, - {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e41f3de4df3e80de75845d3e743b3f1c4c8613c3997a912dbf0229fc61a8b963"}, - {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6918d49b1f90821e93069682c06ffde41829c346c66b721e65a5c62b4bab0300"}, - {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0e5c8764c7829343d919cc2dfc587a8db01c4f70a4ebbc49abde5d4b158b007b"}, - {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8d0edd6b1c7fb94922bf569c9b092ee187a83f03fb1a63076e7774b60f9481a8"}, - {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e29cda763f752553fa14c68fb2195150bfab22b352572cb36c43c47bedba70eb"}, - {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:0c7c1b47859ee2cac3846fde1c1dc0f15da6cec5a0e5c72d101e0f83dcb67ff9"}, - {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:901289d524fdd571be1c7be054f48b1f88ce8dddcbdf1ec698b27d4b8b9e5d62"}, - {file = "frozenlist-1.4.0-cp310-cp310-win32.whl", hash = "sha256:1a0848b52815006ea6596c395f87449f693dc419061cc21e970f139d466dc0a0"}, - {file = "frozenlist-1.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:b206646d176a007466358aa21d85cd8600a415c67c9bd15403336c331a10d956"}, - {file = "frozenlist-1.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:de343e75f40e972bae1ef6090267f8260c1446a1695e77096db6cfa25e759a95"}, - {file = "frozenlist-1.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad2a9eb6d9839ae241701d0918f54c51365a51407fd80f6b8289e2dfca977cc3"}, - {file = "frozenlist-1.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bd7bd3b3830247580de99c99ea2a01416dfc3c34471ca1298bccabf86d0ff4dc"}, - {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bdf1847068c362f16b353163391210269e4f0569a3c166bc6a9f74ccbfc7e839"}, - {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38461d02d66de17455072c9ba981d35f1d2a73024bee7790ac2f9e361ef1cd0c"}, - {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5a32087d720c608f42caed0ef36d2b3ea61a9d09ee59a5142d6070da9041b8f"}, - {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dd65632acaf0d47608190a71bfe46b209719bf2beb59507db08ccdbe712f969b"}, - {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:261b9f5d17cac914531331ff1b1d452125bf5daa05faf73b71d935485b0c510b"}, - {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b89ac9768b82205936771f8d2eb3ce88503b1556324c9f903e7156669f521472"}, - {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:008eb8b31b3ea6896da16c38c1b136cb9fec9e249e77f6211d479db79a4eaf01"}, - {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e74b0506fa5aa5598ac6a975a12aa8928cbb58e1f5ac8360792ef15de1aa848f"}, - {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:490132667476f6781b4c9458298b0c1cddf237488abd228b0b3650e5ecba7467"}, - {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:76d4711f6f6d08551a7e9ef28c722f4a50dd0fc204c56b4bcd95c6cc05ce6fbb"}, - {file = "frozenlist-1.4.0-cp311-cp311-win32.whl", hash = "sha256:a02eb8ab2b8f200179b5f62b59757685ae9987996ae549ccf30f983f40602431"}, - {file = "frozenlist-1.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:515e1abc578dd3b275d6a5114030b1330ba044ffba03f94091842852f806f1c1"}, - {file = "frozenlist-1.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:f0ed05f5079c708fe74bf9027e95125334b6978bf07fd5ab923e9e55e5fbb9d3"}, - {file = "frozenlist-1.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ca265542ca427bf97aed183c1676e2a9c66942e822b14dc6e5f42e038f92a503"}, - {file = "frozenlist-1.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:491e014f5c43656da08958808588cc6c016847b4360e327a62cb308c791bd2d9"}, - {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17ae5cd0f333f94f2e03aaf140bb762c64783935cc764ff9c82dff626089bebf"}, - {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e78fb68cf9c1a6aa4a9a12e960a5c9dfbdb89b3695197aa7064705662515de2"}, - {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5655a942f5f5d2c9ed93d72148226d75369b4f6952680211972a33e59b1dfdc"}, - {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c11b0746f5d946fecf750428a95f3e9ebe792c1ee3b1e96eeba145dc631a9672"}, - {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e66d2a64d44d50d2543405fb183a21f76b3b5fd16f130f5c99187c3fb4e64919"}, - {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:88f7bc0fcca81f985f78dd0fa68d2c75abf8272b1f5c323ea4a01a4d7a614efc"}, - {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5833593c25ac59ede40ed4de6d67eb42928cca97f26feea219f21d0ed0959b79"}, - {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:fec520865f42e5c7f050c2a79038897b1c7d1595e907a9e08e3353293ffc948e"}, - {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:b826d97e4276750beca7c8f0f1a4938892697a6bcd8ec8217b3312dad6982781"}, - {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ceb6ec0a10c65540421e20ebd29083c50e6d1143278746a4ef6bcf6153171eb8"}, - {file = "frozenlist-1.4.0-cp38-cp38-win32.whl", hash = "sha256:2b8bcf994563466db019fab287ff390fffbfdb4f905fc77bc1c1d604b1c689cc"}, - {file = "frozenlist-1.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:a6c8097e01886188e5be3e6b14e94ab365f384736aa1fca6a0b9e35bd4a30bc7"}, - {file = "frozenlist-1.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:6c38721585f285203e4b4132a352eb3daa19121a035f3182e08e437cface44bf"}, - {file = "frozenlist-1.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a0c6da9aee33ff0b1a451e867da0c1f47408112b3391dd43133838339e410963"}, - {file = "frozenlist-1.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:93ea75c050c5bb3d98016b4ba2497851eadf0ac154d88a67d7a6816206f6fa7f"}, - {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f61e2dc5ad442c52b4887f1fdc112f97caeff4d9e6ebe78879364ac59f1663e1"}, - {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa384489fefeb62321b238e64c07ef48398fe80f9e1e6afeff22e140e0850eef"}, - {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:10ff5faaa22786315ef57097a279b833ecab1a0bfb07d604c9cbb1c4cdc2ed87"}, - {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:007df07a6e3eb3e33e9a1fe6a9db7af152bbd8a185f9aaa6ece10a3529e3e1c6"}, - {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f4f399d28478d1f604c2ff9119907af9726aed73680e5ed1ca634d377abb087"}, - {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c5374b80521d3d3f2ec5572e05adc94601985cc526fb276d0c8574a6d749f1b3"}, - {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ce31ae3e19f3c902de379cf1323d90c649425b86de7bbdf82871b8a2a0615f3d"}, - {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7211ef110a9194b6042449431e08c4d80c0481e5891e58d429df5899690511c2"}, - {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:556de4430ce324c836789fa4560ca62d1591d2538b8ceb0b4f68fb7b2384a27a"}, - {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7645a8e814a3ee34a89c4a372011dcd817964ce8cb273c8ed6119d706e9613e3"}, - {file = "frozenlist-1.4.0-cp39-cp39-win32.whl", hash = "sha256:19488c57c12d4e8095a922f328df3f179c820c212940a498623ed39160bc3c2f"}, - {file = "frozenlist-1.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:6221d84d463fb110bdd7619b69cb43878a11d51cbb9394ae3105d082d5199167"}, - {file = "frozenlist-1.4.0.tar.gz", hash = "sha256:09163bdf0b2907454042edb19f887c6d33806adc71fbd54afc14908bfdc22251"}, -] - [[package]] name = "furl" version = "2.1.3" @@ -1830,13 +1662,13 @@ dev = ["jsonref", "matplotlib"] [[package]] name = "google-api-core" -version = "2.14.0" +version = "2.15.0" description = "Google API client core library" optional = false python-versions = ">=3.7" files = [ - {file = "google-api-core-2.14.0.tar.gz", hash = "sha256:5368a4502b793d9bbf812a5912e13e4e69f9bd87f6efb508460c43f5bbd1ce41"}, - {file = "google_api_core-2.14.0-py3-none-any.whl", hash = "sha256:de2fb50ed34d47ddbb2bd2dcf680ee8fead46279f4ed6b16de362aca23a18952"}, + {file = "google-api-core-2.15.0.tar.gz", hash = "sha256:abc978a72658f14a2df1e5e12532effe40f94f868f6e23d95133bd6abcca35ca"}, + {file = "google_api_core-2.15.0-py3-none-any.whl", hash = "sha256:2aa56d2be495551e66bbff7f729b790546f87d5c90e74781aa77233bcb395a8a"}, ] [package.dependencies] @@ -1854,13 +1686,13 @@ grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] [[package]] name = "google-auth" -version = "2.23.4" +version = "2.25.1" description = "Google Authentication Library" optional = false python-versions = ">=3.7" files = [ - {file = "google-auth-2.23.4.tar.gz", hash = "sha256:79905d6b1652187def79d491d6e23d0cbb3a21d3c7ba0dbaa9c8a01906b13ff3"}, - {file = "google_auth-2.23.4-py2.py3-none-any.whl", hash = "sha256:d4bbc92fe4b8bfd2f3e8d88e5ba7085935da208ee38a134fc280e7ce682a05f2"}, + {file = "google-auth-2.25.1.tar.gz", hash = "sha256:d5d66b8f4f6e3273740d7bb73ddefa6c2d1ff691704bd407d51c6b5800e7c97b"}, + {file = "google_auth-2.25.1-py2.py3-none-any.whl", hash = "sha256:dfd7b44935d498e106c08883b2dac0ad36d8aa10402a6412e9a1c9d74b4773f1"}, ] [package.dependencies] @@ -2816,13 +2648,13 @@ format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339- [[package]] name = "jsonschema-specifications" -version = "2023.11.1" +version = "2023.11.2" description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" optional = false python-versions = ">=3.8" files = [ - {file = "jsonschema_specifications-2023.11.1-py3-none-any.whl", hash = "sha256:f596778ab612b3fd29f72ea0d990393d0540a5aab18bf0407a46632eab540779"}, - {file = "jsonschema_specifications-2023.11.1.tar.gz", hash = "sha256:c9b234904ffe02f079bf91b14d79987faa685fd4b39c377a0996954c0090b9ca"}, + {file = "jsonschema_specifications-2023.11.2-py3-none-any.whl", hash = "sha256:e74ba7c0a65e8cb49dc26837d6cfe576557084a8b423ed16a420984228104f93"}, + {file = "jsonschema_specifications-2023.11.2.tar.gz", hash = "sha256:9472fc4fea474cd74bea4a2b190daeccb5a9e4db2ea80efcf7a1b582fc9a81b8"}, ] [package.dependencies] @@ -3140,89 +2972,6 @@ files = [ {file = "msgpack-1.0.7.tar.gz", hash = "sha256:572efc93db7a4d27e404501975ca6d2d9775705c2d922390d878fcf768d92c87"}, ] -[[package]] -name = "multidict" -version = "6.0.4" -description = "multidict implementation" -optional = false -python-versions = ">=3.7" -files = [ - {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b1a97283e0c85772d613878028fec909f003993e1007eafa715b24b377cb9b8"}, - {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eeb6dcc05e911516ae3d1f207d4b0520d07f54484c49dfc294d6e7d63b734171"}, - {file = "multidict-6.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d6d635d5209b82a3492508cf5b365f3446afb65ae7ebd755e70e18f287b0adf7"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c048099e4c9e9d615545e2001d3d8a4380bd403e1a0578734e0d31703d1b0c0b"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea20853c6dbbb53ed34cb4d080382169b6f4554d394015f1bef35e881bf83547"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16d232d4e5396c2efbbf4f6d4df89bfa905eb0d4dc5b3549d872ab898451f569"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36c63aaa167f6c6b04ef2c85704e93af16c11d20de1d133e39de6a0e84582a93"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:64bdf1086b6043bf519869678f5f2757f473dee970d7abf6da91ec00acb9cb98"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:43644e38f42e3af682690876cff722d301ac585c5b9e1eacc013b7a3f7b696a0"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7582a1d1030e15422262de9f58711774e02fa80df0d1578995c76214f6954988"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ddff9c4e225a63a5afab9dd15590432c22e8057e1a9a13d28ed128ecf047bbdc"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ee2a1ece51b9b9e7752e742cfb661d2a29e7bcdba2d27e66e28a99f1890e4fa0"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a2e4369eb3d47d2034032a26c7a80fcb21a2cb22e1173d761a162f11e562caa5"}, - {file = "multidict-6.0.4-cp310-cp310-win32.whl", hash = "sha256:574b7eae1ab267e5f8285f0fe881f17efe4b98c39a40858247720935b893bba8"}, - {file = "multidict-6.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:4dcbb0906e38440fa3e325df2359ac6cb043df8e58c965bb45f4e406ecb162cc"}, - {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0dfad7a5a1e39c53ed00d2dd0c2e36aed4650936dc18fd9a1826a5ae1cad6f03"}, - {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:64da238a09d6039e3bd39bb3aee9c21a5e34f28bfa5aa22518581f910ff94af3"}, - {file = "multidict-6.0.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ff959bee35038c4624250473988b24f846cbeb2c6639de3602c073f10410ceba"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01a3a55bd90018c9c080fbb0b9f4891db37d148a0a18722b42f94694f8b6d4c9"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5cb09abb18c1ea940fb99360ea0396f34d46566f157122c92dfa069d3e0e982"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:666daae833559deb2d609afa4490b85830ab0dfca811a98b70a205621a6109fe"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11bdf3f5e1518b24530b8241529d2050014c884cf18b6fc69c0c2b30ca248710"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d18748f2d30f94f498e852c67d61261c643b349b9d2a581131725595c45ec6c"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:458f37be2d9e4c95e2d8866a851663cbc76e865b78395090786f6cd9b3bbf4f4"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b1a2eeedcead3a41694130495593a559a668f382eee0727352b9a41e1c45759a"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7d6ae9d593ef8641544d6263c7fa6408cc90370c8cb2bbb65f8d43e5b0351d9c"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5979b5632c3e3534e42ca6ff856bb24b2e3071b37861c2c727ce220d80eee9ed"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dcfe792765fab89c365123c81046ad4103fcabbc4f56d1c1997e6715e8015461"}, - {file = "multidict-6.0.4-cp311-cp311-win32.whl", hash = "sha256:3601a3cece3819534b11d4efc1eb76047488fddd0c85a3948099d5da4d504636"}, - {file = "multidict-6.0.4-cp311-cp311-win_amd64.whl", hash = "sha256:81a4f0b34bd92df3da93315c6a59034df95866014ac08535fc819f043bfd51f0"}, - {file = "multidict-6.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:67040058f37a2a51ed8ea8f6b0e6ee5bd78ca67f169ce6122f3e2ec80dfe9b78"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:853888594621e6604c978ce2a0444a1e6e70c8d253ab65ba11657659dcc9100f"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:39ff62e7d0f26c248b15e364517a72932a611a9b75f35b45be078d81bdb86603"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:af048912e045a2dc732847d33821a9d84ba553f5c5f028adbd364dd4765092ac"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e8b901e607795ec06c9e42530788c45ac21ef3aaa11dbd0c69de543bfb79a9"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62501642008a8b9871ddfccbf83e4222cf8ac0d5aeedf73da36153ef2ec222d2"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:99b76c052e9f1bc0721f7541e5e8c05db3941eb9ebe7b8553c625ef88d6eefde"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:509eac6cf09c794aa27bcacfd4d62c885cce62bef7b2c3e8b2e49d365b5003fe"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:21a12c4eb6ddc9952c415f24eef97e3e55ba3af61f67c7bc388dcdec1404a067"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:5cad9430ab3e2e4fa4a2ef4450f548768400a2ac635841bc2a56a2052cdbeb87"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ab55edc2e84460694295f401215f4a58597f8f7c9466faec545093045476327d"}, - {file = "multidict-6.0.4-cp37-cp37m-win32.whl", hash = "sha256:5a4dcf02b908c3b8b17a45fb0f15b695bf117a67b76b7ad18b73cf8e92608775"}, - {file = "multidict-6.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:6ed5f161328b7df384d71b07317f4d8656434e34591f20552c7bcef27b0ab88e"}, - {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5fc1b16f586f049820c5c5b17bb4ee7583092fa0d1c4e28b5239181ff9532e0c"}, - {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1502e24330eb681bdaa3eb70d6358e818e8e8f908a22a1851dfd4e15bc2f8161"}, - {file = "multidict-6.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b692f419760c0e65d060959df05f2a531945af31fda0c8a3b3195d4efd06de11"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45e1ecb0379bfaab5eef059f50115b54571acfbe422a14f668fc8c27ba410e7e"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddd3915998d93fbcd2566ddf9cf62cdb35c9e093075f862935573d265cf8f65d"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:59d43b61c59d82f2effb39a93c48b845efe23a3852d201ed2d24ba830d0b4cf2"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc8e1d0c705233c5dd0c5e6460fbad7827d5d36f310a0fadfd45cc3029762258"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6aa0418fcc838522256761b3415822626f866758ee0bc6632c9486b179d0b52"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6748717bb10339c4760c1e63da040f5f29f5ed6e59d76daee30305894069a660"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4d1a3d7ef5e96b1c9e92f973e43aa5e5b96c659c9bc3124acbbd81b0b9c8a951"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4372381634485bec7e46718edc71528024fcdc6f835baefe517b34a33c731d60"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:fc35cb4676846ef752816d5be2193a1e8367b4c1397b74a565a9d0389c433a1d"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4b9d9e4e2b37daddb5c23ea33a3417901fa7c7b3dee2d855f63ee67a0b21e5b1"}, - {file = "multidict-6.0.4-cp38-cp38-win32.whl", hash = "sha256:e41b7e2b59679edfa309e8db64fdf22399eec4b0b24694e1b2104fb789207779"}, - {file = "multidict-6.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:d6c254ba6e45d8e72739281ebc46ea5eb5f101234f3ce171f0e9f5cc86991480"}, - {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:16ab77bbeb596e14212e7bab8429f24c1579234a3a462105cda4a66904998664"}, - {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc779e9e6f7fda81b3f9aa58e3a6091d49ad528b11ed19f6621408806204ad35"}, - {file = "multidict-6.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ceef517eca3e03c1cceb22030a3e39cb399ac86bff4e426d4fc6ae49052cc60"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:281af09f488903fde97923c7744bb001a9b23b039a909460d0f14edc7bf59706"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52f2dffc8acaba9a2f27174c41c9e57f60b907bb9f096b36b1a1f3be71c6284d"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b41156839806aecb3641f3208c0dafd3ac7775b9c4c422d82ee2a45c34ba81ca"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5e3fc56f88cc98ef8139255cf8cd63eb2c586531e43310ff859d6bb3a6b51f1"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8316a77808c501004802f9beebde51c9f857054a0c871bd6da8280e718444449"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f70b98cd94886b49d91170ef23ec5c0e8ebb6f242d734ed7ed677b24d50c82cf"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bf6774e60d67a9efe02b3616fee22441d86fab4c6d335f9d2051d19d90a40063"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:e69924bfcdda39b722ef4d9aa762b2dd38e4632b3641b1d9a57ca9cd18f2f83a"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:6b181d8c23da913d4ff585afd1155a0e1194c0b50c54fcfe286f70cdaf2b7176"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:52509b5be062d9eafc8170e53026fbc54cf3b32759a23d07fd935fb04fc22d95"}, - {file = "multidict-6.0.4-cp39-cp39-win32.whl", hash = "sha256:27c523fbfbdfd19c6867af7346332b62b586eed663887392cff78d614f9ec313"}, - {file = "multidict-6.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:33029f5734336aa0d4c0384525da0387ef89148dc7191aae00ca5fb23d7aafc2"}, - {file = "multidict-6.0.4.tar.gz", hash = "sha256:3666906492efb76453c0e7b97f2cf459b0682e7402c0489a95484965dbc1da49"}, -] - [[package]] name = "multimethod" version = "1.10" @@ -3416,25 +3165,26 @@ signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] [[package]] name = "openai" -version = "0.28.1" -description = "Python client library for the OpenAI API" +version = "1.3.7" +description = "The official Python library for the openai API" optional = false python-versions = ">=3.7.1" files = [ - {file = "openai-0.28.1-py3-none-any.whl", hash = "sha256:d18690f9e3d31eedb66b57b88c2165d760b24ea0a01f150dd3f068155088ce68"}, - {file = "openai-0.28.1.tar.gz", hash = "sha256:4be1dad329a65b4ce1a660fe6d5431b438f429b5855c883435f0f7fcb6d2dcc8"}, + {file = "openai-1.3.7-py3-none-any.whl", hash = "sha256:e5c51367a910297e4d1cd33d2298fb87d7edf681edbe012873925ac16f95bee0"}, + {file = "openai-1.3.7.tar.gz", hash = "sha256:18074a0f51f9b49d1ae268c7abc36f7f33212a0c0d08ce11b7053ab2d17798de"}, ] [package.dependencies] -aiohttp = "*" -requests = ">=2.20" -tqdm = "*" +anyio = ">=3.5.0,<4" +distro = ">=1.7.0,<2" +httpx = ">=0.23.0,<1" +pydantic = ">=1.9.0,<3" +sniffio = "*" +tqdm = ">4" +typing-extensions = ">=4.5,<5" [package.extras] -datalib = ["numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] -dev = ["black (>=21.6b0,<22.0)", "pytest (==6.*)", "pytest-asyncio", "pytest-mock"] -embeddings = ["matplotlib", "numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)", "plotly", "scikit-learn (>=1.0.2)", "scipy", "tenacity (>=8.0.1)"] -wandb = ["numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)", "wandb"] +datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] [[package]] name = "orderedmultidict" @@ -3618,13 +3368,13 @@ sqlalchemy = ["sqlalchemy (>=1.4.29)"] [[package]] name = "posthog" -version = "3.0.2" +version = "3.1.0" description = "Integrate PostHog into any python application." optional = false python-versions = "*" files = [ - {file = "posthog-3.0.2-py2.py3-none-any.whl", hash = "sha256:a8c0af6f2401fbe50f90e68c4143d0824b54e872de036b1c2f23b5abb39d88ce"}, - {file = "posthog-3.0.2.tar.gz", hash = "sha256:701fba6e446a4de687c6e861b587e7b7741955ad624bf34fe013c06a0fec6fb3"}, + {file = "posthog-3.1.0-py2.py3-none-any.whl", hash = "sha256:acd033530bdfc275dce5587f205f62378991ecb9b7cd5479e79c7f4ac575d319"}, + {file = "posthog-3.1.0.tar.gz", hash = "sha256:db17a2c511e18757aec12b6632ddcc1fa318743dad88a4666010467a3d9468da"}, ] [package.dependencies] @@ -3637,7 +3387,7 @@ six = ">=1.5" [package.extras] dev = ["black", "flake8", "flake8-print", "isort", "pre-commit"] sentry = ["django", "sentry-sdk"] -test = ["coverage", "flake8", "freezegun (==0.3.15)", "mock (>=2.0.0)", "pylint", "pytest"] +test = ["coverage", "flake8", "freezegun (==0.3.15)", "mock (>=2.0.0)", "pylint", "pytest", "pytest-timeout"] [[package]] name = "prompt-toolkit" @@ -4269,13 +4019,13 @@ full = ["numpy", "pandas"] [[package]] name = "referencing" -version = "0.31.1" +version = "0.32.0" description = "JSON Referencing + Python" optional = false python-versions = ">=3.8" files = [ - {file = "referencing-0.31.1-py3-none-any.whl", hash = "sha256:c19c4d006f1757e3dd75c4f784d38f8698d87b649c54f9ace14e5e8c9667c01d"}, - {file = "referencing-0.31.1.tar.gz", hash = "sha256:81a1471c68c9d5e3831c30ad1dd9815c45b558e596653db751a2bfdd17b3b9ec"}, + {file = "referencing-0.32.0-py3-none-any.whl", hash = "sha256:bdcd3efb936f82ff86f993093f6da7435c7de69a3b3a5a06678a6050184bee99"}, + {file = "referencing-0.32.0.tar.gz", hash = "sha256:689e64fe121843dcfd57b71933318ef1f91188ffb45367332700a86ac8fd6161"}, ] [package.dependencies] @@ -4935,13 +4685,13 @@ test = ["pytest", "pytest-cov"] [[package]] name = "starlette" -version = "0.32.0.post1" +version = "0.33.0" description = "The little ASGI library that shines." optional = false python-versions = ">=3.8" files = [ - {file = "starlette-0.32.0.post1-py3-none-any.whl", hash = "sha256:cd0cb10ddb49313f609cedfac62c8c12e56c7314b66d89bb077ba228bada1b09"}, - {file = "starlette-0.32.0.post1.tar.gz", hash = "sha256:e54e2b7e2fb06dff9eac40133583f10dfa05913f5a85bf26f427c7a40a9a3d02"}, + {file = "starlette-0.33.0-py3-none-any.whl", hash = "sha256:6d492f0f7dfb2dd646ac7d80444af4989cd5c5c78f622fac99e39a66c3e01f06"}, + {file = "starlette-0.33.0.tar.gz", hash = "sha256:8c21f9592451b2016300c5bbc54b181063367b62720a4048656c070319238897"}, ] [package.dependencies] @@ -5008,40 +4758,47 @@ enum = ["django-choices-field (>=2.2.2)"] [[package]] name = "tiktoken" -version = "0.5.1" +version = "0.5.2" description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models" optional = false python-versions = ">=3.8" files = [ - {file = "tiktoken-0.5.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2b0bae3fd56de1c0a5874fb6577667a3c75bf231a6cef599338820210c16e40a"}, - {file = "tiktoken-0.5.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e529578d017045e2f0ed12d2e00e7e99f780f477234da4aae799ec4afca89f37"}, - {file = "tiktoken-0.5.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edd2ffbb789712d83fee19ab009949f998a35c51ad9f9beb39109357416344ff"}, - {file = "tiktoken-0.5.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4c73d47bdc1a3f1f66ffa019af0386c48effdc6e8797e5e76875f6388ff72e9"}, - {file = "tiktoken-0.5.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:46b8554b9f351561b1989157c6bb54462056f3d44e43aa4e671367c5d62535fc"}, - {file = "tiktoken-0.5.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:92ed3bbf71a175a6a4e5fbfcdb2c422bdd72d9b20407e00f435cf22a68b4ea9b"}, - {file = "tiktoken-0.5.1-cp310-cp310-win_amd64.whl", hash = "sha256:714efb2f4a082635d9f5afe0bf7e62989b72b65ac52f004eb7ac939f506c03a4"}, - {file = "tiktoken-0.5.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a10488d1d1a5f9c9d2b2052fdb4cf807bba545818cb1ef724a7f5d44d9f7c3d4"}, - {file = "tiktoken-0.5.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8079ac065572fe0e7c696dbd63e1fdc12ce4cdca9933935d038689d4732451df"}, - {file = "tiktoken-0.5.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ef730db4097f5b13df8d960f7fdda2744fe21d203ea2bb80c120bb58661b155"}, - {file = "tiktoken-0.5.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:426e7def5f3f23645dada816be119fa61e587dfb4755de250e136b47a045c365"}, - {file = "tiktoken-0.5.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:323cec0031358bc09aa965c2c5c1f9f59baf76e5b17e62dcc06d1bb9bc3a3c7c"}, - {file = "tiktoken-0.5.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5abd9436f02e2c8eda5cce2ff8015ce91f33e782a7423de2a1859f772928f714"}, - {file = "tiktoken-0.5.1-cp311-cp311-win_amd64.whl", hash = "sha256:1fe99953b63aabc0c9536fbc91c3c9000d78e4755edc28cc2e10825372046a2d"}, - {file = "tiktoken-0.5.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:dcdc630461927718b317e6f8be7707bd0fc768cee1fdc78ddaa1e93f4dc6b2b1"}, - {file = "tiktoken-0.5.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1f2b3b253e22322b7f53a111e1f6d7ecfa199b4f08f3efdeb0480f4033b5cdc6"}, - {file = "tiktoken-0.5.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:43ce0199f315776dec3ea7bf86f35df86d24b6fcde1babd3e53c38f17352442f"}, - {file = "tiktoken-0.5.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a84657c083d458593c0235926b5c993eec0b586a2508d6a2020556e5347c2f0d"}, - {file = "tiktoken-0.5.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c008375c0f3d97c36e81725308699116cd5804fdac0f9b7afc732056329d2790"}, - {file = "tiktoken-0.5.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:779c4dea5edd1d3178734d144d32231e0b814976bec1ec09636d1003ffe4725f"}, - {file = "tiktoken-0.5.1-cp38-cp38-win_amd64.whl", hash = "sha256:b5dcfcf9bfb798e86fbce76d40a1d5d9e3f92131aecfa3d1e5c9ea1a20f1ef1a"}, - {file = "tiktoken-0.5.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9b180a22db0bbcc447f691ffc3cf7a580e9e0587d87379e35e58b826ebf5bc7b"}, - {file = "tiktoken-0.5.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2b756a65d98b7cf760617a6b68762a23ab8b6ef79922be5afdb00f5e8a9f4e76"}, - {file = "tiktoken-0.5.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba9873c253ca1f670e662192a0afcb72b41e0ba3e730f16c665099e12f4dac2d"}, - {file = "tiktoken-0.5.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:74c90d2be0b4c1a2b3f7dde95cd976757817d4df080d6af0ee8d461568c2e2ad"}, - {file = "tiktoken-0.5.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:709a5220891f2b56caad8327fab86281787704931ed484d9548f65598dea9ce4"}, - {file = "tiktoken-0.5.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5d5a187ff9c786fae6aadd49f47f019ff19e99071dc5b0fe91bfecc94d37c686"}, - {file = "tiktoken-0.5.1-cp39-cp39-win_amd64.whl", hash = "sha256:e21840043dbe2e280e99ad41951c00eff8ee3b63daf57cd4c1508a3fd8583ea2"}, - {file = "tiktoken-0.5.1.tar.gz", hash = "sha256:27e773564232004f4f810fd1f85236673ec3a56ed7f1206fc9ed8670ebedb97a"}, + {file = "tiktoken-0.5.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8c4e654282ef05ec1bd06ead22141a9a1687991cef2c6a81bdd1284301abc71d"}, + {file = "tiktoken-0.5.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7b3134aa24319f42c27718c6967f3c1916a38a715a0fa73d33717ba121231307"}, + {file = "tiktoken-0.5.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6092e6e77730929c8c6a51bb0d7cfdf1b72b63c4d033d6258d1f2ee81052e9e5"}, + {file = "tiktoken-0.5.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72ad8ae2a747622efae75837abba59be6c15a8f31b4ac3c6156bc56ec7a8e631"}, + {file = "tiktoken-0.5.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:51cba7c8711afa0b885445f0637f0fcc366740798c40b981f08c5f984e02c9d1"}, + {file = "tiktoken-0.5.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3d8c7d2c9313f8e92e987d585ee2ba0f7c40a0de84f4805b093b634f792124f5"}, + {file = "tiktoken-0.5.2-cp310-cp310-win_amd64.whl", hash = "sha256:692eca18c5fd8d1e0dde767f895c17686faaa102f37640e884eecb6854e7cca7"}, + {file = "tiktoken-0.5.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:138d173abbf1ec75863ad68ca289d4da30caa3245f3c8d4bfb274c4d629a2f77"}, + {file = "tiktoken-0.5.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7388fdd684690973fdc450b47dfd24d7f0cbe658f58a576169baef5ae4658607"}, + {file = "tiktoken-0.5.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a114391790113bcff670c70c24e166a841f7ea8f47ee2fe0e71e08b49d0bf2d4"}, + {file = "tiktoken-0.5.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca96f001e69f6859dd52926d950cfcc610480e920e576183497ab954e645e6ac"}, + {file = "tiktoken-0.5.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:15fed1dd88e30dfadcdd8e53a8927f04e1f6f81ad08a5ca824858a593ab476c7"}, + {file = "tiktoken-0.5.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:93f8e692db5756f7ea8cb0cfca34638316dcf0841fb8469de8ed7f6a015ba0b0"}, + {file = "tiktoken-0.5.2-cp311-cp311-win_amd64.whl", hash = "sha256:bcae1c4c92df2ffc4fe9f475bf8148dbb0ee2404743168bbeb9dcc4b79dc1fdd"}, + {file = "tiktoken-0.5.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b76a1e17d4eb4357d00f0622d9a48ffbb23401dcf36f9716d9bd9c8e79d421aa"}, + {file = "tiktoken-0.5.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:01d8b171bb5df4035580bc26d4f5339a6fd58d06f069091899d4a798ea279d3e"}, + {file = "tiktoken-0.5.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42adf7d4fb1ed8de6e0ff2e794a6a15005f056a0d83d22d1d6755a39bffd9e7f"}, + {file = "tiktoken-0.5.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c3f894dbe0adb44609f3d532b8ea10820d61fdcb288b325a458dfc60fefb7db"}, + {file = "tiktoken-0.5.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:58ccfddb4e62f0df974e8f7e34a667981d9bb553a811256e617731bf1d007d19"}, + {file = "tiktoken-0.5.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58902a8bad2de4268c2a701f1c844d22bfa3cbcc485b10e8e3e28a050179330b"}, + {file = "tiktoken-0.5.2-cp312-cp312-win_amd64.whl", hash = "sha256:5e39257826d0647fcac403d8fa0a474b30d02ec8ffc012cfaf13083e9b5e82c5"}, + {file = "tiktoken-0.5.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8bde3b0fbf09a23072d39c1ede0e0821f759b4fa254a5f00078909158e90ae1f"}, + {file = "tiktoken-0.5.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2ddee082dcf1231ccf3a591d234935e6acf3e82ee28521fe99af9630bc8d2a60"}, + {file = "tiktoken-0.5.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35c057a6a4e777b5966a7540481a75a31429fc1cb4c9da87b71c8b75b5143037"}, + {file = "tiktoken-0.5.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c4a049b87e28f1dc60509f8eb7790bc8d11f9a70d99b9dd18dfdd81a084ffe6"}, + {file = "tiktoken-0.5.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5bf5ce759089f4f6521ea6ed89d8f988f7b396e9f4afb503b945f5c949c6bec2"}, + {file = "tiktoken-0.5.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0c964f554af1a96884e01188f480dad3fc224c4bbcf7af75d4b74c4b74ae0125"}, + {file = "tiktoken-0.5.2-cp38-cp38-win_amd64.whl", hash = "sha256:368dd5726d2e8788e47ea04f32e20f72a2012a8a67af5b0b003d1e059f1d30a3"}, + {file = "tiktoken-0.5.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a2deef9115b8cd55536c0a02c0203512f8deb2447f41585e6d929a0b878a0dd2"}, + {file = "tiktoken-0.5.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2ed7d380195affbf886e2f8b92b14edfe13f4768ff5fc8de315adba5b773815e"}, + {file = "tiktoken-0.5.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c76fce01309c8140ffe15eb34ded2bb94789614b7d1d09e206838fc173776a18"}, + {file = "tiktoken-0.5.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60a5654d6a2e2d152637dd9a880b4482267dfc8a86ccf3ab1cec31a8c76bfae8"}, + {file = "tiktoken-0.5.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:41d4d3228e051b779245a8ddd21d4336f8975563e92375662f42d05a19bdff41"}, + {file = "tiktoken-0.5.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a5c1cdec2c92fcde8c17a50814b525ae6a88e8e5b02030dc120b76e11db93f13"}, + {file = "tiktoken-0.5.2-cp39-cp39-win_amd64.whl", hash = "sha256:84ddb36faedb448a50b246e13d1b6ee3437f60b7169b723a4b2abad75e914f3e"}, + {file = "tiktoken-0.5.2.tar.gz", hash = "sha256:f54c581f134a8ea96ce2023ab221d4d4d81ab614efa0b2fbce926387deb56c80"}, ] [package.dependencies] @@ -5600,123 +5357,6 @@ Brotli = {version = "*", optional = true, markers = "extra == \"brotli\""} [package.extras] brotli = ["Brotli"] -[[package]] -name = "wsproto" -version = "1.2.0" -description = "WebSockets state-machine based protocol implementation" -optional = false -python-versions = ">=3.7.0" -files = [ - {file = "wsproto-1.2.0-py3-none-any.whl", hash = "sha256:b9acddd652b585d75b20477888c56642fdade28bdfd3579aa24a4d2c037dd736"}, - {file = "wsproto-1.2.0.tar.gz", hash = "sha256:ad565f26ecb92588a3e43bc3d96164de84cd9902482b130d0ddbaa9664a85065"}, -] - -[package.dependencies] -h11 = ">=0.9.0,<1" - -[[package]] -name = "yarl" -version = "1.9.3" -description = "Yet another URL library" -optional = false -python-versions = ">=3.7" -files = [ - {file = "yarl-1.9.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:32435d134414e01d937cd9d6cc56e8413a8d4741dea36af5840c7750f04d16ab"}, - {file = "yarl-1.9.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9a5211de242754b5e612557bca701f39f8b1a9408dff73c6db623f22d20f470e"}, - {file = "yarl-1.9.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:525cd69eff44833b01f8ef39aa33a9cc53a99ff7f9d76a6ef6a9fb758f54d0ff"}, - {file = "yarl-1.9.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc94441bcf9cb8c59f51f23193316afefbf3ff858460cb47b5758bf66a14d130"}, - {file = "yarl-1.9.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e36021db54b8a0475805acc1d6c4bca5d9f52c3825ad29ae2d398a9d530ddb88"}, - {file = "yarl-1.9.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e0f17d1df951336a02afc8270c03c0c6e60d1f9996fcbd43a4ce6be81de0bd9d"}, - {file = "yarl-1.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c5f3faeb8100a43adf3e7925d556801d14b5816a0ac9e75e22948e787feec642"}, - {file = "yarl-1.9.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aed37db837ecb5962469fad448aaae0f0ee94ffce2062cf2eb9aed13328b5196"}, - {file = "yarl-1.9.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:721ee3fc292f0d069a04016ef2c3a25595d48c5b8ddc6029be46f6158d129c92"}, - {file = "yarl-1.9.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:b8bc5b87a65a4e64bc83385c05145ea901b613d0d3a434d434b55511b6ab0067"}, - {file = "yarl-1.9.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:dd952b9c64f3b21aedd09b8fe958e4931864dba69926d8a90c90d36ac4e28c9a"}, - {file = "yarl-1.9.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:c405d482c320a88ab53dcbd98d6d6f32ada074f2d965d6e9bf2d823158fa97de"}, - {file = "yarl-1.9.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9df9a0d4c5624790a0dea2e02e3b1b3c69aed14bcb8650e19606d9df3719e87d"}, - {file = "yarl-1.9.3-cp310-cp310-win32.whl", hash = "sha256:d34c4f80956227f2686ddea5b3585e109c2733e2d4ef12eb1b8b4e84f09a2ab6"}, - {file = "yarl-1.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:cf7a4e8de7f1092829caef66fd90eaf3710bc5efd322a816d5677b7664893c93"}, - {file = "yarl-1.9.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d61a0ca95503867d4d627517bcfdc28a8468c3f1b0b06c626f30dd759d3999fd"}, - {file = "yarl-1.9.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:73cc83f918b69110813a7d95024266072d987b903a623ecae673d1e71579d566"}, - {file = "yarl-1.9.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d81657b23e0edb84b37167e98aefb04ae16cbc5352770057893bd222cdc6e45f"}, - {file = "yarl-1.9.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26a1a8443091c7fbc17b84a0d9f38de34b8423b459fb853e6c8cdfab0eacf613"}, - {file = "yarl-1.9.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fe34befb8c765b8ce562f0200afda3578f8abb159c76de3ab354c80b72244c41"}, - {file = "yarl-1.9.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2c757f64afe53a422e45e3e399e1e3cf82b7a2f244796ce80d8ca53e16a49b9f"}, - {file = "yarl-1.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72a57b41a0920b9a220125081c1e191b88a4cdec13bf9d0649e382a822705c65"}, - {file = "yarl-1.9.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:632c7aeb99df718765adf58eacb9acb9cbc555e075da849c1378ef4d18bf536a"}, - {file = "yarl-1.9.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b0b8c06afcf2bac5a50b37f64efbde978b7f9dc88842ce9729c020dc71fae4ce"}, - {file = "yarl-1.9.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:1d93461e2cf76c4796355494f15ffcb50a3c198cc2d601ad8d6a96219a10c363"}, - {file = "yarl-1.9.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:4003f380dac50328c85e85416aca6985536812c082387255c35292cb4b41707e"}, - {file = "yarl-1.9.3-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4d6d74a97e898c1c2df80339aa423234ad9ea2052f66366cef1e80448798c13d"}, - {file = "yarl-1.9.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b61e64b06c3640feab73fa4ff9cb64bd8182de52e5dc13038e01cfe674ebc321"}, - {file = "yarl-1.9.3-cp311-cp311-win32.whl", hash = "sha256:29beac86f33d6c7ab1d79bd0213aa7aed2d2f555386856bb3056d5fdd9dab279"}, - {file = "yarl-1.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:f7271d6bd8838c49ba8ae647fc06469137e1c161a7ef97d778b72904d9b68696"}, - {file = "yarl-1.9.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:dd318e6b75ca80bff0b22b302f83a8ee41c62b8ac662ddb49f67ec97e799885d"}, - {file = "yarl-1.9.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c4b1efb11a8acd13246ffb0bee888dd0e8eb057f8bf30112e3e21e421eb82d4a"}, - {file = "yarl-1.9.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c6f034386e5550b5dc8ded90b5e2ff7db21f0f5c7de37b6efc5dac046eb19c10"}, - {file = "yarl-1.9.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd49a908cb6d387fc26acee8b7d9fcc9bbf8e1aca890c0b2fdfd706057546080"}, - {file = "yarl-1.9.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa4643635f26052401750bd54db911b6342eb1a9ac3e74f0f8b58a25d61dfe41"}, - {file = "yarl-1.9.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e741bd48e6a417bdfbae02e088f60018286d6c141639359fb8df017a3b69415a"}, - {file = "yarl-1.9.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c86d0d0919952d05df880a1889a4f0aeb6868e98961c090e335671dea5c0361"}, - {file = "yarl-1.9.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3d5434b34100b504aabae75f0622ebb85defffe7b64ad8f52b8b30ec6ef6e4b9"}, - {file = "yarl-1.9.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:79e1df60f7c2b148722fb6cafebffe1acd95fd8b5fd77795f56247edaf326752"}, - {file = "yarl-1.9.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:44e91a669c43f03964f672c5a234ae0d7a4d49c9b85d1baa93dec28afa28ffbd"}, - {file = "yarl-1.9.3-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:3cfa4dbe17b2e6fca1414e9c3bcc216f6930cb18ea7646e7d0d52792ac196808"}, - {file = "yarl-1.9.3-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:88d2c3cc4b2f46d1ba73d81c51ec0e486f59cc51165ea4f789677f91a303a9a7"}, - {file = "yarl-1.9.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:cccdc02e46d2bd7cb5f38f8cc3d9db0d24951abd082b2f242c9e9f59c0ab2af3"}, - {file = "yarl-1.9.3-cp312-cp312-win32.whl", hash = "sha256:96758e56dceb8a70f8a5cff1e452daaeff07d1cc9f11e9b0c951330f0a2396a7"}, - {file = "yarl-1.9.3-cp312-cp312-win_amd64.whl", hash = "sha256:c4472fe53ebf541113e533971bd8c32728debc4c6d8cc177f2bff31d011ec17e"}, - {file = "yarl-1.9.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:126638ab961633f0940a06e1c9d59919003ef212a15869708dcb7305f91a6732"}, - {file = "yarl-1.9.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c99ddaddb2fbe04953b84d1651149a0d85214780e4d0ee824e610ab549d98d92"}, - {file = "yarl-1.9.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8dab30b21bd6fb17c3f4684868c7e6a9e8468078db00f599fb1c14e324b10fca"}, - {file = "yarl-1.9.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:828235a2a169160ee73a2fcfb8a000709edf09d7511fccf203465c3d5acc59e4"}, - {file = "yarl-1.9.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc391e3941045fd0987c77484b2799adffd08e4b6735c4ee5f054366a2e1551d"}, - {file = "yarl-1.9.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:51382c72dd5377861b573bd55dcf680df54cea84147c8648b15ac507fbef984d"}, - {file = "yarl-1.9.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:28a108cb92ce6cf867690a962372996ca332d8cda0210c5ad487fe996e76b8bb"}, - {file = "yarl-1.9.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:8f18a7832ff85dfcd77871fe677b169b1bc60c021978c90c3bb14f727596e0ae"}, - {file = "yarl-1.9.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:7eaf13af79950142ab2bbb8362f8d8d935be9aaf8df1df89c86c3231e4ff238a"}, - {file = "yarl-1.9.3-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:66a6dbf6ca7d2db03cc61cafe1ee6be838ce0fbc97781881a22a58a7c5efef42"}, - {file = "yarl-1.9.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:1a0a4f3aaa18580038cfa52a7183c8ffbbe7d727fe581300817efc1e96d1b0e9"}, - {file = "yarl-1.9.3-cp37-cp37m-win32.whl", hash = "sha256:946db4511b2d815979d733ac6a961f47e20a29c297be0d55b6d4b77ee4b298f6"}, - {file = "yarl-1.9.3-cp37-cp37m-win_amd64.whl", hash = "sha256:2dad8166d41ebd1f76ce107cf6a31e39801aee3844a54a90af23278b072f1ccf"}, - {file = "yarl-1.9.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:bb72d2a94481e7dc7a0c522673db288f31849800d6ce2435317376a345728225"}, - {file = "yarl-1.9.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9a172c3d5447b7da1680a1a2d6ecdf6f87a319d21d52729f45ec938a7006d5d8"}, - {file = "yarl-1.9.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2dc72e891672343b99db6d497024bf8b985537ad6c393359dc5227ef653b2f17"}, - {file = "yarl-1.9.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b8d51817cf4b8d545963ec65ff06c1b92e5765aa98831678d0e2240b6e9fd281"}, - {file = "yarl-1.9.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:53ec65f7eee8655bebb1f6f1607760d123c3c115a324b443df4f916383482a67"}, - {file = "yarl-1.9.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cfd77e8e5cafba3fb584e0f4b935a59216f352b73d4987be3af51f43a862c403"}, - {file = "yarl-1.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e73db54c967eb75037c178a54445c5a4e7461b5203b27c45ef656a81787c0c1b"}, - {file = "yarl-1.9.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09c19e5f4404574fcfb736efecf75844ffe8610606f3fccc35a1515b8b6712c4"}, - {file = "yarl-1.9.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6280353940f7e5e2efaaabd686193e61351e966cc02f401761c4d87f48c89ea4"}, - {file = "yarl-1.9.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:c25ec06e4241e162f5d1f57c370f4078797ade95c9208bd0c60f484834f09c96"}, - {file = "yarl-1.9.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:7217234b10c64b52cc39a8d82550342ae2e45be34f5bff02b890b8c452eb48d7"}, - {file = "yarl-1.9.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:4ce77d289f8d40905c054b63f29851ecbfd026ef4ba5c371a158cfe6f623663e"}, - {file = "yarl-1.9.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5f74b015c99a5eac5ae589de27a1201418a5d9d460e89ccb3366015c6153e60a"}, - {file = "yarl-1.9.3-cp38-cp38-win32.whl", hash = "sha256:8a2538806be846ea25e90c28786136932ec385c7ff3bc1148e45125984783dc6"}, - {file = "yarl-1.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:6465d36381af057d0fab4e0f24ef0e80ba61f03fe43e6eeccbe0056e74aadc70"}, - {file = "yarl-1.9.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2f3c8822bc8fb4a347a192dd6a28a25d7f0ea3262e826d7d4ef9cc99cd06d07e"}, - {file = "yarl-1.9.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b7831566595fe88ba17ea80e4b61c0eb599f84c85acaa14bf04dd90319a45b90"}, - {file = "yarl-1.9.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ff34cb09a332832d1cf38acd0f604c068665192c6107a439a92abfd8acf90fe2"}, - {file = "yarl-1.9.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe8080b4f25dfc44a86bedd14bc4f9d469dfc6456e6f3c5d9077e81a5fedfba7"}, - {file = "yarl-1.9.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8535e111a064f3bdd94c0ed443105934d6f005adad68dd13ce50a488a0ad1bf3"}, - {file = "yarl-1.9.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0d155a092bf0ebf4a9f6f3b7a650dc5d9a5bbb585ef83a52ed36ba46f55cc39d"}, - {file = "yarl-1.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:778df71c8d0c8c9f1b378624b26431ca80041660d7be7c3f724b2c7a6e65d0d6"}, - {file = "yarl-1.9.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b9f9cafaf031c34d95c1528c16b2fa07b710e6056b3c4e2e34e9317072da5d1a"}, - {file = "yarl-1.9.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ca6b66f69e30f6e180d52f14d91ac854b8119553b524e0e28d5291a724f0f423"}, - {file = "yarl-1.9.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:e0e7e83f31e23c5d00ff618045ddc5e916f9e613d33c5a5823bc0b0a0feb522f"}, - {file = "yarl-1.9.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:af52725c7c39b0ee655befbbab5b9a1b209e01bb39128dce0db226a10014aacc"}, - {file = "yarl-1.9.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0ab5baaea8450f4a3e241ef17e3d129b2143e38a685036b075976b9c415ea3eb"}, - {file = "yarl-1.9.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6d350388ba1129bc867c6af1cd17da2b197dff0d2801036d2d7d83c2d771a682"}, - {file = "yarl-1.9.3-cp39-cp39-win32.whl", hash = "sha256:e2a16ef5fa2382af83bef4a18c1b3bcb4284c4732906aa69422cf09df9c59f1f"}, - {file = "yarl-1.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:d92d897cb4b4bf915fbeb5e604c7911021a8456f0964f3b8ebbe7f9188b9eabb"}, - {file = "yarl-1.9.3-py3-none-any.whl", hash = "sha256:271d63396460b6607b588555ea27a1a02b717ca2e3f2cf53bdde4013d7790929"}, - {file = "yarl-1.9.3.tar.gz", hash = "sha256:4a14907b597ec55740f63e52d7fee0e9ee09d5b9d57a4f399a7423268e457b57"}, -] - -[package.dependencies] -idna = ">=2.0" -multidict = ">=4.0" - [[package]] name = "zope-interface" version = "6.1" @@ -5773,4 +5413,4 @@ testing = ["coverage (>=5.0.3)", "zope.event", "zope.testing"] [metadata] lock-version = "2.0" python-versions = "^3.11" -content-hash = "dc1c5624edbd27a3f5aac987db1eb748b1a35aa91aba80a19b7ae364d1416460" +content-hash = "d49fb4d165014ae58b7b9dc08a7f9765b843914a43b1089eac691c80096c3399" diff --git a/grai-server/app/pyproject.toml b/grai-server/app/pyproject.toml index 041da81c3..eb735011c 100644 --- a/grai-server/app/pyproject.toml +++ b/grai-server/app/pyproject.toml @@ -71,15 +71,13 @@ gunicorn = "^21.2.0" strawberry-graphql = {extras = ["channels", "asgi"], version = "^0.209.7"} asgi-cors-strawberry = "^0.2.0" overrides = "^7.4.0" -wsproto = "^1.2.0" -openai = "^0.28.1" +openai = "^1.3.7" tiktoken = "^0.5.1" pgvector = "^0.2.3" pyjwt = "^2.8.0" django-csp = "^3.7" - [tool.poetry.group.dev.dependencies] isort = "^5.10.1" black = "^23.7.0" diff --git a/grai-server/app/the_guide/settings/base.py b/grai-server/app/the_guide/settings/base.py index 009b2a833..373360a1c 100755 --- a/grai-server/app/the_guide/settings/base.py +++ b/grai-server/app/the_guide/settings/base.py @@ -372,15 +372,16 @@ def inner(value: str | bool) -> bool: OPENAI_API_KEY = config("OPENAI_API_KEY", None) OPENAI_ORG_ID = config("OPENAI_ORG_ID", None) -OPENAI_PREFERRED_MODEL = config("OPENAI_PREFERRED_MODEL", "gpt-3.5-turbo") +OPENAI_PREFERRED_MODEL = config("OPENAI_PREFERRED_MODEL", default="", cast=lambda x: "gpt-3.5-turbo" if x == "" else x) openai.organization = OPENAI_ORG_ID openai.api_key = OPENAI_API_KEY if OPENAI_API_KEY is not None and OPENAI_ORG_ID is not None: try: - models = [item["id"] for item in openai.Model.list()["data"]] - except openai.error.AuthenticationError as e: + client = openai.Client(api_key=OPENAI_API_KEY, organization=OPENAI_ORG_ID) + models = [item.id for item in client.models.list().data] + except openai.AuthenticationError as e: warnings.warn("Could not authenticate with OpenAI API key and organization id.") HAS_OPENAI = False else: diff --git a/grai-server/app/workspaces/management/commands/create_sample_data.py b/grai-server/app/workspaces/management/commands/create_sample_data.py index b37eaeff3..468606a35 100644 --- a/grai-server/app/workspaces/management/commands/create_sample_data.py +++ b/grai-server/app/workspaces/management/commands/create_sample_data.py @@ -4,13 +4,15 @@ from workspaces.models import Workspace from workspaces.sample_data import SampleData +from decouple import config class Command(BaseCommand): help = "Create sample data" def handle(self, *args, **options): - workspace = Workspace.objects.get(name="default") + name = config("DJANGO_SUPERUSER_WORKSPACE", "default") + workspace = Workspace.objects.get(name=name) generator = SampleData(workspace) async_to_sync(generator.generate)() self.stdout.write(self.style.SUCCESS("Successfully created sample data")) diff --git a/grai-server/app/workspaces/migrations/0009_alter_workspace_ai_enabled.py b/grai-server/app/workspaces/migrations/0009_alter_workspace_ai_enabled.py new file mode 100644 index 000000000..aeba28e1d --- /dev/null +++ b/grai-server/app/workspaces/migrations/0009_alter_workspace_ai_enabled.py @@ -0,0 +1,28 @@ +# Generated by Django 4.2.7 on 2023-12-01 17:14 + +from django.db import migrations, models +from workspaces.models import Workspace + + +def disable_default_ai_enabled(apps, schema_editor): + # Require users to explicitly enable AI components + workspaces = Workspace.objects.all() + for workspace in workspaces: + workspace.ai_enabled = False + + Workspace.objects.bulk_update(workspaces, ["ai_enabled"]) + + +class Migration(migrations.Migration): + dependencies = [ + ("workspaces", "0008_workspace_sample_data"), + ] + + operations = [ + migrations.AlterField( + model_name="workspace", + name="ai_enabled", + field=models.BooleanField(default=False), + ), + migrations.RunPython(disable_default_ai_enabled, migrations.RunPython.noop), + ] diff --git a/grai-server/app/workspaces/models.py b/grai-server/app/workspaces/models.py index c5ada486f..d19b8a271 100755 --- a/grai-server/app/workspaces/models.py +++ b/grai-server/app/workspaces/models.py @@ -33,7 +33,7 @@ class Workspace(TenantModel): ) sample_data = models.BooleanField(default=False) search_enabled = models.BooleanField(default=True) - ai_enabled = models.BooleanField(default=True) + ai_enabled = models.BooleanField(default=False) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) @@ -45,6 +45,27 @@ def __str__(self): def ref(self): return f"{self.organisation.name}/{self.name}" + def update_embeddings(self): + from lineage.models import Node + from lineage.tasks import update_node_vector_index + + if not self.ai_enabled: + return + + nodes_without_embeddings: list[uuid.UUID] = Node.objects.filter( + workspace_id=self.id, nodeembeddings__isnull=True + ).values_list("id", flat=True) + + for node_id in nodes_without_embeddings: + update_node_vector_index.delay(node_id) + + def save(self, *args, **kwargs): + current_state = Workspace.objects.filter(id=self.id).first() + super().save(*args, **kwargs) + + if current_state is None or current_state.ai_enabled != self.ai_enabled: + self.update_embeddings() + class TenantMeta: tenant_field_name = "id" diff --git a/grai-server/docker-compose.yml b/grai-server/docker-compose.yml index 25dabb13c..e73943e58 100755 --- a/grai-server/docker-compose.yml +++ b/grai-server/docker-compose.yml @@ -18,6 +18,7 @@ services: - DJANGO_SUPERUSER_PASSWORD=super_secret - OPENAI_API_KEY=${OPENAI_API_KEY} - OPENAI_ORG_ID=${OPENAI_ORG_ID} + - OPENAI_PREFERRED_MODEL=${OPENAI_PREFERRED_MODEL} depends_on: - redis - db @@ -40,6 +41,7 @@ services: - DJANGO_SETTINGS_MODULE=the_guide.settings.dev - OPENAI_API_KEY=${OPENAI_API_KEY} - OPENAI_ORG_ID=${OPENAI_ORG_ID} + - OPENAI_PREFERRED_MODEL=${OPENAI_PREFERRED_MODEL} depends_on: - redis - db @@ -57,6 +59,7 @@ services: - DJANGO_SETTINGS_MODULE=the_guide.settings.dev - OPENAI_API_KEY=${OPENAI_API_KEY} - OPENAI_ORG_ID=${OPENAI_ORG_ID} + - OPENAI_PREFERRED_MODEL=${OPENAI_PREFERRED_MODEL} depends_on: - redis - db @@ -71,7 +74,7 @@ services: - the_guide db: - image: ankane/pgvector:latest + image: ankane/pgvector shm_size: 1g environment: - POSTGRES_USER=grai diff --git a/grai-server/examples/docker-compose/docker-compose.yml b/grai-server/examples/docker-compose/docker-compose.yml index 20d0e6674..2477483f4 100755 --- a/grai-server/examples/docker-compose/docker-compose.yml +++ b/grai-server/examples/docker-compose/docker-compose.yml @@ -27,7 +27,7 @@ services: retries: 10 start_period: 20s db: - image: postgres:14.3-alpine + image: ankane/pgvector environment: - POSTGRES_USER=grai - POSTGRES_PASSWORD=grai diff --git a/poetry.lock b/poetry.lock index a55727d60..c7a045207 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.7.0 and should not be changed by hand. [[package]] name = "black" @@ -116,13 +116,13 @@ pyflakes = ">=2.5.0,<2.6.0" [[package]] name = "identify" -version = "2.5.31" +version = "2.5.33" description = "File identification library for Python" optional = false python-versions = ">=3.8" files = [ - {file = "identify-2.5.31-py2.py3-none-any.whl", hash = "sha256:90199cb9e7bd3c5407a9b7e81b4abec4bb9d249991c79439ec8af740afc6293d"}, - {file = "identify-2.5.31.tar.gz", hash = "sha256:7736b3c7a28233637e3c36550646fc6389bedd74ae84cb788200cc8e2dd60b75"}, + {file = "identify-2.5.33-py2.py3-none-any.whl", hash = "sha256:d40ce5fcd762817627670da8a7d8d8e65f24342d14539c59488dc603bf662e34"}, + {file = "identify-2.5.33.tar.gz", hash = "sha256:161558f9fe4559e1557e1bff323e8631f6a0e4837f7497767c1782832f16b62d"}, ] [package.extras] @@ -194,13 +194,13 @@ files = [ [[package]] name = "platformdirs" -version = "3.11.0" +version = "4.1.0" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "platformdirs-3.11.0-py3-none-any.whl", hash = "sha256:e9d171d00af68be50e9202731309c4e658fd8bc76f55c11c7dd760d023bda68e"}, - {file = "platformdirs-3.11.0.tar.gz", hash = "sha256:cf8ee52a3afdb965072dcc652433e0c7e3e40cf5ea1477cd4b3b1d2eb75495b3"}, + {file = "platformdirs-4.1.0-py3-none-any.whl", hash = "sha256:11c8f37bcca40db96d8144522d925583bdb7a31f7b0e37e3ed4318400a8e2380"}, + {file = "platformdirs-4.1.0.tar.gz", hash = "sha256:906d548203468492d432bcb294d4bc2fff751bf84971fbb2c10918cc206ee420"}, ] [package.extras] @@ -308,17 +308,17 @@ files = [ [[package]] name = "setuptools" -version = "68.2.2" +version = "69.0.2" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "setuptools-68.2.2-py3-none-any.whl", hash = "sha256:b454a35605876da60632df1a60f736524eb73cc47bbc9f3f1ef1b644de74fd2a"}, - {file = "setuptools-68.2.2.tar.gz", hash = "sha256:4ac1475276d2f1c48684874089fefcd83bd7162ddaafb81fac866ba0db282a87"}, + {file = "setuptools-69.0.2-py3-none-any.whl", hash = "sha256:1e8fdff6797d3865f37397be788a4e3cba233608e9b509382a2777d25ebde7f2"}, + {file = "setuptools-69.0.2.tar.gz", hash = "sha256:735896e78a4742605974de002ac60562d286fa8051a7e2299445e8e8fbb01aa6"}, ] [package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.1)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] @@ -346,19 +346,19 @@ files = [ [[package]] name = "virtualenv" -version = "20.24.6" +version = "20.25.0" description = "Virtual Python Environment builder" optional = false python-versions = ">=3.7" files = [ - {file = "virtualenv-20.24.6-py3-none-any.whl", hash = "sha256:520d056652454c5098a00c0f073611ccbea4c79089331f60bf9d7ba247bb7381"}, - {file = "virtualenv-20.24.6.tar.gz", hash = "sha256:02ece4f56fbf939dbbc33c0715159951d6bf14aaf5457b092e4548e1382455af"}, + {file = "virtualenv-20.25.0-py3-none-any.whl", hash = "sha256:4238949c5ffe6876362d9c0180fc6c3a824a7b12b80604eeb8085f2ed7460de3"}, + {file = "virtualenv-20.25.0.tar.gz", hash = "sha256:bf51c0d9c7dd63ea8e44086fa1e4fb1093a31e963b86959257378aef020e1f1b"}, ] [package.dependencies] distlib = ">=0.3.7,<1" filelock = ">=3.12.2,<4" -platformdirs = ">=3.9.1,<4" +platformdirs = ">=3.9.1,<5" [package.extras] docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"]