Skip to content

Commit

Permalink
- fixes due to pr comments
Browse files Browse the repository at this point in the history
-add retries on " event loop is closed" error
  • Loading branch information
roman-romanov-o committed Dec 30, 2024
1 parent 9c82ca3 commit dfac827
Show file tree
Hide file tree
Showing 9 changed files with 163 additions and 88 deletions.
28 changes: 15 additions & 13 deletions aidial_adapter_openai/app_config.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import json
import os
from typing import Dict, List
from typing import Callable, Dict, List

from pydantic import BaseModel

Expand All @@ -25,20 +25,22 @@ class ApplicationConfig(BaseModel):
NON_STREAMING_DEPLOYMENTS: List[str] = []
ELIMINATE_EMPTY_CHOICES: bool = False

DEPLOYMENT_TYPE_MAP = {
ChatCompletionDeploymentType.DALLE3: "DALLE3_DEPLOYMENTS",
ChatCompletionDeploymentType.GPT4_VISION: "GPT4_VISION_DEPLOYMENTS",
ChatCompletionDeploymentType.MISTRAL: "MISTRAL_DEPLOYMENTS",
ChatCompletionDeploymentType.DATABRICKS: "DATABRICKS_DEPLOYMENTS",
ChatCompletionDeploymentType.GPT4O: "GPT4O_DEPLOYMENTS",
ChatCompletionDeploymentType.GPT4O_MINI: "GPT4O_MINI_DEPLOYMENTS",
DEPLOYMENT_TYPE_MAP: Dict[
ChatCompletionDeploymentType, Callable[["ApplicationConfig"], List[str]]
] = {
ChatCompletionDeploymentType.DALLE3: lambda config: config.DALLE3_DEPLOYMENTS,
ChatCompletionDeploymentType.GPT4_VISION: lambda config: config.GPT4_VISION_DEPLOYMENTS,
ChatCompletionDeploymentType.MISTRAL: lambda config: config.MISTRAL_DEPLOYMENTS,
ChatCompletionDeploymentType.DATABRICKS: lambda config: config.DATABRICKS_DEPLOYMENTS,
ChatCompletionDeploymentType.GPT4O: lambda config: config.GPT4O_DEPLOYMENTS,
ChatCompletionDeploymentType.GPT4O_MINI: lambda config: config.GPT4O_MINI_DEPLOYMENTS,
}

def get_chat_completion_deployment_type(
self, deployment_id: str
) -> ChatCompletionDeploymentType:
for deployment_type, attr_name in self.DEPLOYMENT_TYPE_MAP.items():
if deployment_id in getattr(self, attr_name):
for deployment_type, config_getter in self.DEPLOYMENT_TYPE_MAP.items():
if deployment_id in config_getter(self):
return deployment_type
return ChatCompletionDeploymentType.GPT_TEXT_ONLY

Expand All @@ -47,9 +49,9 @@ def add_deployment(
):
if deployment_type == ChatCompletionDeploymentType.GPT_TEXT_ONLY:
return
attr_name = self.DEPLOYMENT_TYPE_MAP[deployment_type]
assert attr_name is not None
getattr(self, attr_name).append(deployment_id)
config_getter = self.DEPLOYMENT_TYPE_MAP[deployment_type]
assert config_getter is not None
config_getter(self).append(deployment_id)

@classmethod
def from_env(cls) -> "ApplicationConfig":
Expand Down
6 changes: 6 additions & 0 deletions aidial_adapter_openai/utils/pydantic.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
from pydantic import BaseModel


class ExtraAllowedModel(BaseModel):
class Config:
extra = "allow"
7 changes: 5 additions & 2 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from openai import AsyncAzureOpenAI

from aidial_adapter_openai.app import create_app
from aidial_adapter_openai.app_config import ApplicationConfig
from aidial_adapter_openai.utils.http_client import DEFAULT_TIMEOUT
from aidial_adapter_openai.utils.request import get_app_config
from tests.integration_tests.base import DeploymentConfig, TestDeployments
Expand All @@ -17,14 +18,16 @@
)


@pytest.fixture
@pytest.fixture(scope="session")
def _app_instance():
try:
deployments_config = TestDeployments.from_config(
TEST_DEPLOYMENTS_CONFIG_PATH
)
except FileNotFoundError:
deployments_config = TestDeployments(deployments=[])
deployments_config = TestDeployments(
deployments=[], app_config=ApplicationConfig()
)

return create_app(
init_telemetry=False,
Expand Down
73 changes: 59 additions & 14 deletions tests/integration_tests/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import json
import re
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, Iterator, List
from typing import Any, Callable, Dict, Iterator, List, Self

from openai import NOT_GIVEN, NotGiven
from openai.types.chat import (
Expand All @@ -14,10 +14,33 @@

from aidial_adapter_openai.app_config import ApplicationConfig
from aidial_adapter_openai.constant import ChatCompletionDeploymentType
from aidial_adapter_openai.utils.pydantic import ExtraAllowedModel
from tests.utils.openai import ChatCompletionResult, ExpectedException


class UpstreamConfig(ExtraAllowedModel):
endpoint: str
key: str


class ModelConfig(ExtraAllowedModel):
upstreams: List[UpstreamConfig]


class CoreConfig(ExtraAllowedModel):
models: Dict[str, ModelConfig]

@classmethod
def from_config(cls, config_path: str):
with open(config_path, "r") as f:
test_config = json.load(f)

return cls(**test_config)


class DeploymentConfig(BaseModel):
test_deployment_id: str

deployment_id: str
deployment_type: ChatCompletionDeploymentType
upstream_endpoint: str
Expand All @@ -30,29 +53,51 @@ def upstream_headers(self) -> Dict[str, str]:
"X-UPSTREAM-ENDPOINT": self.upstream_endpoint,
}

@classmethod
def create_deployments(
cls, core_config: CoreConfig, app_config: ApplicationConfig
) -> List[Self]:
configs = []
for model_name, model_config in core_config.models.items():
deployment_type = app_config.get_chat_completion_deployment_type(
model_name
)
for upstream_index, upstream_config in enumerate(
model_config.upstreams
):
test_deployment_id = f"{deployment_type.value}_{model_name}"
if len(model_config.upstreams) > 1:
test_deployment_id += f"_{upstream_index}"
configs.append(
cls(
test_deployment_id=test_deployment_id,
deployment_id=model_name,
deployment_type=deployment_type,
upstream_endpoint=upstream_config.endpoint,
upstream_api_key=upstream_config.key,
)
)
return configs


class TestDeployments(BaseModel):
__test__ = False
deployments: list[DeploymentConfig]
app_config: ApplicationConfig

@classmethod
def from_config(cls, config_path: str):
with open(config_path, "r") as f:
test_configs = json.load(f)
app_config = ApplicationConfig.from_env()

core_config = CoreConfig.from_config(config_path)

return cls(
deployments=[DeploymentConfig(**config) for config in test_configs]
app_config=app_config,
deployments=DeploymentConfig.create_deployments(
core_config, app_config
),
)

@property
def app_config(self) -> ApplicationConfig:
config = ApplicationConfig()
for deployment in self.deployments:
config.add_deployment(
deployment.deployment_id, deployment.deployment_type
)
return config


def sanitize_id_part(value: Any) -> str:
"""Convert any value to a pytest-safe identifier part."""
Expand Down Expand Up @@ -93,7 +138,7 @@ class TestCase:
def get_id(self):
parts = [
sanitize_id_part(self.name),
f"{sanitize_id_part(self.deployment_config.deployment_id)}",
f"{sanitize_id_part(self.deployment_config.test_deployment_id)}",
f"stream:{sanitize_id_part(self.streaming)}",
]

Expand Down
2 changes: 1 addition & 1 deletion tests/integration_tests/chat_completion_suites/tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@ def supports_parallel_tool_calls(deployment_type: ChatCompletionDeploymentType):

def supports_functions(deployment_type: ChatCompletionDeploymentType):
return deployment_type not in [
ChatCompletionDeploymentType.MISTRAL,
ChatCompletionDeploymentType.DATABRICKS,
]

Expand All @@ -50,6 +49,7 @@ def supports_functions(deployment_type: ChatCompletionDeploymentType):
[
ChatCompletionDeploymentType.GPT4_VISION,
ChatCompletionDeploymentType.DALLE3,
ChatCompletionDeploymentType.MISTRAL,
]
)
def tools_common(s: TestSuite) -> None:
Expand Down
88 changes: 51 additions & 37 deletions tests/integration_tests/integration_test_config_example.json
Original file line number Diff line number Diff line change
@@ -1,38 +1,52 @@
[
{
"deployment_id": "gpt-4o-2024-05-13",
"deployment_type": "GPT4O",
"upstream_endpoint": "https://your-azure-endpoint.openai.azure.com/openai/deployments/gpt-4o-2024-05-13/chat/completions",
"upstream_api_key": "your-api-key"
},
{
"deployment_id": "gpt-4o-mini-2024-07-18",
"deployment_type": "GPT4O_MINI",
"upstream_endpoint": "https://your-azure-endpoint.openai.azure.com/openai/deployments/gpt-4o-mini-2024-07-18/chat/completions",
"upstream_api_key": "your-api-key"
},
{
"deployment_id": "gpt-4-vision-preview",
"deployment_type": "GPT4_VISION",
"upstream_endpoint": "https://your-azure-endpoint.openai.azure.com/openai/deployments/gpt-4-vision-preview/chat/completions",
"upstream_api_key": "your-api-key"
},
{
"deployment_id": "databricks-endpoint",
"deployment_type": "DATABRICKS",
"upstream_endpoint": "https://your-databricks-endpoint.cloud.databricks.com/serving-endpoints/chat/completions",
"upstream_api_key": "your-databricks-api-key"
},
{
"deployment_id": "mistral-endpoint",
"deployment_type": "MISTRAL",
"upstream_endpoint": "https://your-databricks-endpoint.cloud.databricks.com/serving-endpoints/chat/completions",
"upstream_api_key": "your-databricks-api-key"
},
{
"deployment_id": "gpt-4-0613",
"deployment_type": "GPT_TEXT_ONLY",
"upstream_endpoint": "https://your-azure-endpoint.openai.azure.com/openai/deployments/gpt-4-0613/chat/completions",
"upstream_api_key": "your-api-key"
{
"models": {
"gpt-4o-2024-05-13": {
"upstreams": [
{
"endpoint": "https://your-azure-endpoint.openai.azure.com/openai/deployments/gpt-4o-2024-05-13/chat/completions",
"key": "your-api-key"
}
]
},
"gpt-4o-mini-2024-07-18": {
"upstreams": [
{
"endpoint": "https://your-azure-endpoint.openai.azure.com/openai/deployments/gpt-4o-mini-2024-07-18/chat/completions",
"key": "your-api-key"
}
]
},
"gpt-4-vision-preview": {
"upstreams": [
{
"endpoint": "https://your-azure-endpoint.openai.azure.com/openai/deployments/gpt-4-vision-preview/chat/completions",
"key": "your-api-key"
}
]
},
"databricks-endpoint": {
"upstreams": [
{
"endpoint": "https://your-databricks-endpoint.cloud.databricks.com/serving-endpoints/chat/completions",
"key": "your-databricks-api-key"
}
]
},
"mistral-endpoint": {
"upstreams": [
{
"endpoint": "https://your-databricks-endpoint.cloud.databricks.com/serving-endpoints/chat/completions",
"key": "your-databricks-api-key"
}
]
},
"gpt-4-0613": {
"upstreams": [
{
"endpoint": "https://your-azure-endpoint.openai.azure.com/openai/deployments/gpt-4-0613/chat/completions",
"key": "your-api-key"
}
]
}
}
]
}
38 changes: 26 additions & 12 deletions tests/integration_tests/test_chat_completion.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
import logging
import re
from typing import List

import pytest
from openai import APIError

from tests.conftest import TEST_DEPLOYMENTS_CONFIG_PATH
from tests.integration_tests.base import (
Expand All @@ -23,6 +25,8 @@
chat_completion,
)

logger = logging.getLogger(__name__)


def create_test_cases(
test_case_builder: TestSuiteBuilder,
Expand Down Expand Up @@ -59,18 +63,28 @@ async def test_chat_completion(
client = get_openai_client(test_case.deployment_config)

async def run_chat_completion() -> ChatCompletionResult:
return await chat_completion(
client,
test_case.deployment_config.deployment_id,
test_case.messages,
test_case.streaming,
test_case.stop,
test_case.max_tokens,
test_case.n,
test_case.functions,
test_case.tools,
test_case.temperature,
)
for _ in range(3):
try:
return await chat_completion(
client,
test_case.deployment_config.deployment_id,
test_case.messages,
test_case.streaming,
test_case.stop,
test_case.max_tokens,
test_case.n,
test_case.functions,
test_case.tools,
test_case.temperature,
)
except APIError as e:
# Somehow, randomly through test, even loop is closing
if e.message == "Event loop is closed":
logger.warning("Event loop is closed, retrying...")
continue
else:
raise e
raise Exception("Event loop retries has failed!")

if isinstance(test_case.expected, ExpectedException):
with pytest.raises(Exception) as exc_info:
Expand Down
Loading

0 comments on commit dfac827

Please sign in to comment.