Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add telemetry for project import; small telemetry fixes #1005

Merged
merged 1 commit into from
Jun 10, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions core/agents/importer.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from core.db.models import Complexity
from core.llm.parser import JSONParser
from core.log import get_logger
from core.telemetry import telemetry
from core.templates.example_project import EXAMPLE_PROJECT_DESCRIPTION

log = get_logger(__name__)
Expand Down Expand Up @@ -84,3 +85,13 @@ async def analyze_project(self):
"complexity": Complexity.HARD if len(self.current_state.files) > 5 else Complexity.SIMPLE,
}
]

n_lines = sum(len(f.content.content.splitlines()) for f in self.current_state.files)
await telemetry.trace_code_event(
"existing-project",
{
"num_files": len(self.current_state.files),
"num_lines": n_lines,
"description": llm_response,
},
)
3 changes: 0 additions & 3 deletions core/cli/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ async def run_project(sm: StateManager, ui: UIBase) -> bool:
:return: True if the orchestrator exited successfully, False otherwise.
"""

telemetry.start()
telemetry.set("app_id", str(sm.project.id))
telemetry.set("initial_prompt", sm.current_state.specification.description)

Expand Down Expand Up @@ -58,7 +57,6 @@ async def run_project(sm: StateManager, ui: UIBase) -> bool:
source=pythagora_source,
)

await telemetry.send()
return success


Expand Down Expand Up @@ -147,7 +145,6 @@ async def run_pythagora_session(sm: StateManager, ui: UIBase, args: Namespace):

if args.project or args.branch or args.step:
telemetry.set("is_continuation", True)
# FIXME: we should send the project stage and other runtime info to the UI
success = await load_project(sm, args.project, args.branch, args.step)
if not success:
return False
Expand Down
5 changes: 4 additions & 1 deletion core/llm/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,6 +181,7 @@ async def __call__(
remaining_retries -= 1
request_log.messages = convo.messages[:]
request_log.response = None
request_log.status = LLMRequestStatus.SUCCESS
request_log.error = None
response = None

Expand Down Expand Up @@ -278,7 +279,9 @@ async def __call__(
response = parser(response)
break
except ValueError as err:
log.debug(f"Error parsing GPT response: {err}, asking LLM to retry", exc_info=True)
request_log.error = f"Error parsing response: {err}"
request_log.status = LLMRequestStatus.ERROR
log.debug(f"Error parsing LLM response: {err}, asking LLM to retry", exc_info=True)
convo.assistant(response)
convo.user(f"Error parsing response: {err}. Please output your response EXACTLY as requested.")
continue
Expand Down
1 change: 1 addition & 0 deletions core/telemetry/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -321,6 +321,7 @@ async def send(self, event: str = "pilot-telemetry"):
Note: this method clears all telemetry data after sending it.
"""
if not self.enabled:
log.debug("Telemetry.send(): telemetry is disabled, not sending data")
return

if self.endpoint is None:
Expand Down
2 changes: 1 addition & 1 deletion tests/llm/test_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ async def test_openai_parser_fails(mock_AsyncOpenAI):

llm = OpenAIClient(cfg)

with pytest.raises(APIError, match="Error parsing LLM response"):
with pytest.raises(APIError, match="Error parsing response"):
await llm(convo, parser=parser, max_retries=1)


Expand Down
Loading