Skip to content

Commit

Permalink
Merge pull request #64 from Percona-Lab/fix_integration_run
Browse files Browse the repository at this point in the history
PMM-7 Make minor adjustments for fetching run results
  • Loading branch information
puneet0191 authored Jun 24, 2024
2 parents 22380a6 + fee1e03 commit 1b47938
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 30 deletions.
9 changes: 1 addition & 8 deletions .github/workflows/RESULTS.yaml
Original file line number Diff line number Diff line change
@@ -1,20 +1,13 @@
name: qa-integration-results
on:
workflow_dispatch:
inputs:
job_name:
description: "Name of the Job for generating summary"
required: true
type: string

jobs:
qa-integration-results:
name: "Generate Integration Job execution results"
runs-on: ubuntu-20.04
timeout-minutes: 10
env:
PK_GITHUB_TOKEN: ${{ secrets.PK_GITHUB_TOKEN }}
JOB_NAME: ${{ inputs.job_name || 'PMM_PS' }}

steps:
- uses: actions/checkout@v2
Expand All @@ -29,4 +22,4 @@ jobs:
with:
name: Attach Results output
path: |
results/workflow_runs_${{ env.JOB_NAME }}.csv
results/*.csv
45 changes: 23 additions & 22 deletions qa_integration_results.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,37 +23,38 @@ def get_workflow_runs(owner, repo, job_name, token):
url = f"https://api.github.com/repos/{owner}/{repo}/actions/runs"
params = {
"event": "workflow_dispatch",
"workflow_id": job_name
"head_branch": "main"
}
response = requests.get(url, headers=headers, params=params)
if response.status_code == 200:
return response.json()["workflow_runs"]
else:
print("Failed to fetch workflow runs")
print(f"Failed to fetch workflow runs for job {job_name}")
return None

# Example usage:
owner = "Percona-Lab"
repo = "qa-integration"
job_name = os.environ.get("JOB_NAME") # Get job_name from environment variable
job_names = ["PMM_PDPGSQL", "PMM_PS", "PMM_PSMDB_PBM", "PMM_PXC", "PMM_PXC_PROXYSQL"]
token = os.environ.get("PK_GITHUB_TOKEN") # Get token from environment variable

workflow_runs = get_workflow_runs(owner, repo, job_name, token)
if workflow_runs:
os.makedirs("results", exist_ok=True)
csv_filename = f'results/workflow_runs_{job_name}.csv'
with open(csv_filename, 'w', newline='') as csvfile:
fieldnames = ['Run ID', 'Triggered By', 'Created At', 'Status']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for run in workflow_runs:
triggered_by = run["triggering_actor"]["login"]
user_name = get_user_details(triggered_by, token)
writer.writerow({
'Run ID': run["id"],
'Triggered By': user_name if user_name else triggered_by,
'Created At': run["created_at"],
'Status': run["conclusion"]
})

print(f"Data exported to {csv_filename} file successfully.")
for job_name in job_names:
workflow_runs = get_workflow_runs(owner, repo, job_name, token)
if workflow_runs:
os.makedirs("results", exist_ok=True)
csv_filename = f'results/workflow_runs_{job_name}.csv'
with open(csv_filename, 'w', newline='') as csvfile:
fieldnames = ['Run ID', 'Triggered By', 'Created At', 'Conclusion']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for run in workflow_runs:
if run['name'] == job_name:
triggered_by = run["triggering_actor"]["login"]
user_name = get_user_details(triggered_by, token)
writer.writerow({
'Run ID': run["id"],
'Triggered By': user_name if user_name else triggered_by,
'Created At': run["created_at"],
'Conclusion': run["conclusion"]
})
print(f"Data exported to {csv_filename} file successfully.")

0 comments on commit 1b47938

Please sign in to comment.