From 3c399b283dfa5a77d3b0ee15f9991917eed58995 Mon Sep 17 00:00:00 2001 From: Rohit Date: Fri, 20 Sep 2024 14:00:07 -0400 Subject: [PATCH] Update --- src/integrations/codecov/ __init__.py | 0 src/integrations/codecov/codecov_client.py | 13 ++- src/seer/app.py | 2 +- src/seer/automation/codebase/repo_client.py | 11 ++ src/seer/automation/codegen/prompts.py | 104 +++++------------- .../codegen/unit_test_coding_component.py | 11 +- src/seer/automation/codegen/unittest_step.py | 9 +- 7 files changed, 58 insertions(+), 92 deletions(-) create mode 100644 src/integrations/codecov/ __init__.py diff --git a/src/integrations/codecov/ __init__.py b/src/integrations/codecov/ __init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/integrations/codecov/codecov_client.py b/src/integrations/codecov/codecov_client.py index d03b373cc..3eae09175 100644 --- a/src/integrations/codecov/codecov_client.py +++ b/src/integrations/codecov/codecov_client.py @@ -12,23 +12,24 @@ def fetch_coverage(owner_username, repo_name, pullid, token=CODECOV_TOKEN): } response = requests.get(url, headers=headers) if response.status_code == 200: - return response.json() + return response.text else: - response.raise_for_status() + return None @staticmethod def fetch_test_results_for_commit(owner_username, repo_name, latest_commit_sha, token=CODECOV_TOKEN): - url = f"https://api.codecov.io/api/v2/github/{owner_username}/repos/{repo_name}/test-results" + url = f"https://api.codecov.io/api/v2/github/{owner_username}/repos/{repo_name}/test-results?commit_id={latest_commit_sha}&outcome=failure" headers = { "Authorization": f"Bearer {token}", "Accept": "application/json", - latest_commit_sha: latest_commit_sha, } response = requests.get(url, headers=headers) if response.status_code == 200: - return response.json() + if response.json()["count"] == 0: + return None + return response.text else: - response.raise_for_status() + return None @staticmethod diff --git a/src/seer/app.py b/src/seer/app.py index 3a8c6957b..151ef0e33 100644 --- a/src/seer/app.py +++ b/src/seer/app.py @@ -2,7 +2,7 @@ import time import flask -from integrations.codecov.codecov_client import CodecovClient +from integrations.codecov import CodecovClient import sentry_sdk from flask import Blueprint, Flask, jsonify from sentry_sdk.integrations.flask import FlaskIntegration diff --git a/src/seer/automation/codebase/repo_client.py b/src/seer/automation/codebase/repo_client.py index ae8c59b09..98bf8c16b 100644 --- a/src/seer/automation/codebase/repo_client.py +++ b/src/seer/automation/codebase/repo_client.py @@ -449,3 +449,14 @@ def get_pr_diff_content(self, pr_url: str) -> str: data.raise_for_status() # Raise an exception for HTTP errors return data.text + + def get_pr_head_sha(self, pr_url: str) -> str: + requester = self.repo._requester + headers = { + "Authorization": f"{requester.auth.token_type} {requester.auth.token}", # type: ignore + "Accept": "application/vnd.github.raw+json", + } + + data = requests.get(pr_url, headers=headers) + data.raise_for_status() # Raise an exception for HTTP errors + return data.json()['head']['sha'] diff --git a/src/seer/automation/codegen/prompts.py b/src/seer/automation/codegen/prompts.py index bf78d7285..868f94c12 100644 --- a/src/seer/automation/codegen/prompts.py +++ b/src/seer/automation/codegen/prompts.py @@ -18,8 +18,12 @@ def format_system_msg(): ) @staticmethod - def format_plan_step_msg(diff_str: str, has_coverage_info: bool | str = False, has_test_result_info: bool | str = False): - return textwrap.dedent( + def format_plan_step_msg( + diff_str: str, + has_coverage_info: str | None = None, + has_test_result_info: str | None = None, + ): + base_msg = textwrap.dedent( """\ You are given the below code changes as a diff: {diff_str} @@ -34,89 +38,37 @@ def format_plan_step_msg(diff_str: str, has_coverage_info: bool | str = False, h # Guidelines: - No placeholders are allowed, the unit test must be clear and detailed. - Make sure you use the tools provided to look through the codebase and at the files you are changing before outputting your suggested fix. - - The unit tests must be comprehensive. Do not provide temporary examples, placeholders or incomplete ones. + - The unit tests must be comprehensive. Do not provide temporary examples, placeholders, or incomplete ones. - In your suggested unit tests, whenever you are providing code, provide explicit diffs to show the exact changes that need to be made. - All your changes should be in test files. - EVERY TIME before you use a tool, think step-by-step each time before using the tools provided to you. - You also MUST think step-by-step before giving the final answer.""" - ).format( - diff_str=diff_str, - ) - - @staticmethod - def format_additional_code_coverage_info(coverage_info_str: str): - return textwrap.dedent( - """\ - You are given the following code coverage information for the current diff as a json object: - {coverage_info_str} - - # Additional Instructions for Using Code Coverage Information: - - 1. Analyze the provided code coverage data carefully. Pay attention to: - - Lines that are marked as 'miss' or 'partial' - - The overall coverage percentage for each file - - Any significant differences between 'base_totals' and 'head_totals' - - 2. Prioritize creating tests for: - - Uncovered lines (marked as 'miss') - - Partially covered lines (marked as 'partial') - - Files with lower overall coverage percentages - - New or modified code that has resulted in coverage decreases - - 3. For each file in the diff: - - Compare the 'base_totals' and 'head_totals' to identify changes in coverage - - Focus on increasing both line and branch coverage - - Pay special attention to changes in complexity and ensure they are adequately tested - - 4. When designing tests: - - Aim to increase the 'hits' count while decreasing 'misses' and 'partials' - - Consider edge cases and boundary conditions, especially for partially covered lines - - Ensure that any increase in 'complexity_total' is matched with appropriate test coverage - - Remember, the goal is not just to increase the coverage numbers, but to ensure that the new tests meaningfully verify the behavior of the code, especially focusing on the changes and additions in the current diff. - - Integrate this information with the diff analysis to provide a comprehensive and targeted testing strategy. - """ - ).format( - coverage_info_str=coverage_info_str, - ) - - @staticmethod - def format_additional_test_results_info(test_result_data: str): - return textwrap.dedent( - """\ - You are provided with the following test result data for existing tests for the diff: - {test_result_data} - - # Instructions for Analyzing Test Result Data: + ).format(diff_str=diff_str) - 1. Review the test result data for each existing test, focusing on: - - Failure messages - - Test duration - - Failure rates - - 2. For tests with failures: - - Analyze the failure messages to identify issues introduced or exposed by this commit - - Consider creating additional tests to isolate and address these failures - - Suggest improvements or refactoring for the existing tests if the failures seem to be related to the changes in this commit + if has_coverage_info: + coverage_info_msg = textwrap.dedent( + """\ + You are also given the following code coverage information for the current diff as a JSON object: + {coverage_info_str} - 3. For tests with unusually long durations: - - Evaluate if the commit has introduced performance issues - - Consider adding performance-related tests if long durations are unexpected and related to the changes + Remember, the goal is not just to improve coverage numbers but to verify the behavior of the code meaningfully, focusing on the recent changes. + Integrate this information with your diff analysis to provide a comprehensive and targeted testing strategy. + """ + ).format(coverage_info_str=has_coverage_info) + base_msg += "\n\n" + coverage_info_msg - 4. Use the 'name' field of each test to understand its purpose and coverage area: - - Identify gaps in test coverage based on the names and purposes of existing tests, especially in relation to the changes in this commit - - Suggest new tests that complement the existing test suite and specifically address the changes in this commit + if has_test_result_info: + test_result_info_msg = textwrap.dedent( + """\ + You are provided with the following test result data for existing tests related to the diff: + {test_result_data} - 5. Pay special attention to the 'failure_rate' for each test: - - For tests with high failure rates, investigate if the failures are directly related to the changes in this commit - - For tests that previously passed but now fail, focus on understanding what in the commit might have caused this change + Use this information to enhance your test creation strategy, ensuring new tests reinforce areas of failure and improve overall test suite effectiveness in the context of the introduced changes. + """ + ).format(test_result_data=has_test_result_info) + base_msg += "\n\n" + test_result_info_msg - Use this information to enhance your test creation strategy, ensuring that new tests reinforce areas of failure, and improve overall test suite effectiveness in the context of the changes introduced. - """ - ).format( - test_result_data=test_result_data, - ) + return base_msg @staticmethod def format_find_unit_test_pattern_step_msg(diff_str: str): diff --git a/src/seer/automation/codegen/unit_test_coding_component.py b/src/seer/automation/codegen/unit_test_coding_component.py index 268f4af72..2b95b35d5 100644 --- a/src/seer/automation/codegen/unit_test_coding_component.py +++ b/src/seer/automation/codegen/unit_test_coding_component.py @@ -17,7 +17,7 @@ from seer.automation.component import BaseComponent from seer.automation.models import FileChange from seer.automation.utils import escape_multi_xml, extract_text_inside_tags -from integrations.codecov.codecov_client import CodecovClient +from integrations.codecov import CodecovClient logger = logging.getLogger(__name__) @@ -59,12 +59,10 @@ def invoke(self, request: CodeUnitTestRequest, codecov_client_params: dict | Non test_result_data = CodecovClient.fetch_test_results_for_commit( repo_name=codecov_client_params["repo_name"], - pullid=codecov_client_params["pullid"], owner_username=codecov_client_params["owner_username"], - latest_commit_sha="SHA GOES HERE" + latest_commit_sha=codecov_client_params["head_sha"] ) - print(code_coverage_data, test_result_data) # Pass this into format_plan_step_msg if they exist. Then combine the prompts existing_test_design_response = self._get_test_design_summary( @@ -76,7 +74,9 @@ def invoke(self, request: CodeUnitTestRequest, codecov_client_params: dict | Non self._get_plan( agent=agent, prompt=CodingUnitTestPrompts.format_plan_step_msg( - diff_str=request.diff + diff_str=request.diff, + has_coverage_info=code_coverage_data, + has_test_result_info=test_result_data ) ) @@ -89,7 +89,6 @@ def invoke(self, request: CodeUnitTestRequest, codecov_client_params: dict | Non if not final_response: return None - plan_steps_content = extract_text_inside_tags(final_response, "plan_steps") if len(plan_steps_content) == 0: diff --git a/src/seer/automation/codegen/unittest_step.py b/src/seer/automation/codegen/unittest_step.py index 4cf841224..5467934e7 100644 --- a/src/seer/automation/codegen/unittest_step.py +++ b/src/seer/automation/codegen/unittest_step.py @@ -53,14 +53,17 @@ def _invoke(self, **kwargs): repo_client = self.context.get_repo_client() pr = repo_client.repo.get_pull(self.request.pr_id) + diff_content = repo_client.get_pr_diff_content(pr.url) + + latest_commit_sha = repo_client.get_pr_head_sha(pr.url) + codecov_client_params = { "repo_name": self.request.repo_definition.name, "pullid": self.request.pr_id, - "owner_username": self.request.repo_definition.owner + "owner_username": self.request.repo_definition.owner, + "head_sha": latest_commit_sha } - diff_content = repo_client.get_pr_diff_content(pr.url) - unittest_output = UnitTestCodingComponent(self.context).invoke( CodeUnitTestRequest( diff=diff_content,