Skip to content

Commit

Permalink
Merge pull request #92 from log10-io/mjin/add-pytest
Browse files Browse the repository at this point in the history
Add initial pytest files for server response and load testing
  • Loading branch information
nqn committed Jan 31, 2024
2 parents 0a76811 + 4766e16 commit c43c48b
Show file tree
Hide file tree
Showing 3 changed files with 162 additions and 3 deletions.
87 changes: 84 additions & 3 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 3 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,9 @@ packages = [

[tool.poetry.group.dev.dependencies]
build = "^0.10.0"
pytest = "^8.0.0"
requests-mock = "^1.11.0"
respx = "^0.20.2"

[project.urls]
"Homepage" = "https://github.com/log10-io/log10"
Expand Down
75 changes: 75 additions & 0 deletions tests/test_requests.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
import asyncio
import uuid

import httpx
import pytest
import requests_mock

from log10.load import log_sync, log_async, OpenAI, log10_session
from log10.llm import LLM, Log10Config


def test_log_sync_500():
payload = {'abc': '123'}
url = 'https://log10.io/api/completions'

with requests_mock.Mocker() as m:
m.post(url, status_code=500)
log_sync(url, payload)


@pytest.mark.asyncio
async def test_log_async_500():
payload = {'abc': '123'}
url = 'https://log10.io/api/completions'

with requests_mock.Mocker() as m:
m.post(url, status_code=500)
await log_async(url, payload)


@pytest.mark.skip(reason="This is a very simple load test and doesn't need to be run as part of the test suite.")
@pytest.mark.asyncio
async def test_log_async_multiple_calls():
simultaneous_calls = 100
url = 'https://log10.io/api/completions'

mock_resp = {
"role": "user",
"content": "Say this is a test",
}

log10_config = Log10Config()
loop = asyncio.get_event_loop()

def fake_logging():
llm = LLM(log10_config=log10_config)
completion_id = llm.log_start(url, kind="chat")
print(completion_id)
llm.log_end(completion_id=completion_id, response=mock_resp, duration=5)

await asyncio.gather(*[loop.run_in_executor(None, fake_logging) for _ in range(simultaneous_calls)])


@pytest.mark.skip(reason="This is a very simple load test and doesn't need to be run as part of the test suite.")
@pytest.mark.asyncio
async def test_log_async_httpx_multiple_calls_with_tags(respx_mock):
simultaneous_calls = 100

mock_resp = {
"role": "user",
"content": "Say this is a test",
}

client = OpenAI()

respx_mock.post("https://api.openai.com/v1/chat/completions").mock(return_value=httpx.Response(200, json=mock_resp))

def better_logging():
uuids = [str(uuid.uuid4()) for _ in range(5)]
with log10_session(tags=uuids) as s:
completion = client.chat.completions.create(model="gpt-3.5-turbo", messages=[
{"role": "user", "content": "Say pong"}])

loop = asyncio.get_event_loop()
await asyncio.gather(*[loop.run_in_executor(None, better_logging) for _ in range(simultaneous_calls)])

0 comments on commit c43c48b

Please sign in to comment.