Skip to content

Benchmark Tests

Benchmark Tests #8

Workflow file for this run

name: Benchmark Tests
on:
# schedule:
# - cron: '12 1 * * 0'
pull_request_review:
jobs:
test:
name: Execute benchmark tests
if: github.event_name == 'schedule' || (github.event_name == 'pull_request_review' && (github.event.review.state == 'approved' || contains(github.event.review.body, 'please run benchmark')))
runs-on: ubuntu-22.04
env:
BENCHMARK_NUMBER_SAMPLES: 100
steps:
- name: Checkout
uses: actions/checkout@v4
with:
# Need to fetch enough nodes to get the common ancestor - but don't want to fetch everything
fetch-depth: 100
- name: Get hashes for schedule event
if: ${{ github.event_name == 'schedule' }}
run: |
echo "OLD_REF_SHA=$(git rev-parse 'main@{7 days ago}')" >> $GITHUB_ENV
echo "NEW_REF_SHA=$(git rev-parse 'main')" >> $GITHUB_ENV
- name: Get hashes for PR review event
if: ${{ github.event_name == 'pull_request_review' }}
uses: actions/github-script@v6
with:
script: |
const child_process = require("child_process");
const pull_request = context.payload.pull_request;
child_process.exec(`git merge-base ${pull_request.head.sha} ${pull_request.base.sha}`, (error, stdout, stderr) => {
if (error) {
console.log(error);
process.exit(1);
return;
}
if (stderr) {
console.log(stderr);
process.exit(1);
return;
}
core.exportVariable('OLD_REF_SHA', stdout.trim());
core.exportVariable('NEW_REF_SHA', pull_request.head.sha);
core.exportVariable('PULL_REQUEST_ID', pull_request.number);
});
- name: Base Setup
uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1
- uses: iterative/setup-cml@v2
# First run the benchmark on the old reference
- name: Checkout old reference
run: |
echo Checking out ${OLD_REF_SHA}...
git checkout ${OLD_REF_SHA}
- name: Install dependencies
run: |
bash ./scripts/ci_install.sh
# Build dev-mode
jlpm run build
- name: Launch JupyterLab
shell: bash
run: |
# Mount a volume to overwrite the server configuration
(jlpm start > /tmp/jupyterlab_server_old.log 2>&1) &
working-directory: galata
- name: Install browser
run: |
# Install only Chromium browser
jlpm playwright install chromium
jlpm run build
working-directory: galata
- name: Wait for JupyterLab
run: npx wait-on@7.2.0 http-get://localhost:8888/lab -t 360000
- name: Execute benchmark tests
continue-on-error: true
working-directory: galata
run: |
jlpm run test:benchmark -u
- name: Kill the server
shell: bash
run: |
kill -s SIGKILL $(pgrep jupyter-lab)
# Second run the benchmark on the new reference
- name: Checkout latest version
run: |
cp galata/lab-benchmark-expected.json /tmp/
git restore galata/lab-benchmark-expected.json || true # Not versioned any more
echo Checking out ${NEW_REF_SHA}...
git checkout ${NEW_REF_SHA}
- name: Install dependencies
run: |
# Reset installation
jlpm run clean:slate
jlpm run build
- name: Launch JupyterLab
working-directory: galata
run: |
# Mount a volume to overwrite the server configuration
(jlpm start > /tmp/jupyterlab_server_new.log 2>&1) &
- name: Install browser
working-directory: galata
run: |
# Install only Chromium browser
jlpm playwright install chromium
jlpm run build
- name: Wait for JupyterLab
run: npx wait-on@7.2.0 http-get://localhost:8888/lab -t 360000
- name: Execute benchmark tests
continue-on-error: true
shell: bash
working-directory: galata
run: |
set -ex
# Update test screenshots - in case they have not yet been corrected on the challenger branch
BENCHMARK_NUMBER_SAMPLES=1 PW_VIDEO=1 jlpm run test:benchmark -u
# Copy reference here otherwise it will use the value from the update screenshots
# command called just before
cp /tmp/lab-benchmark-expected.json .
jlpm run test:benchmark
- name: Stop JupyterLab
if: always()
run: |
kill -s SIGTERM $(pgrep jupyter-lab)
- name: Generate the report
env:
REPO_TOKEN: ${{ secrets.GITHUB_TOKEN }}
REPORT: ./benchmark-results/lab-benchmark.md
shell: bash
working-directory: galata
run: |
# Publish image to cml.dev
echo "" >> ${REPORT}
cml-publish ./benchmark-results/lab-benchmark.svg --md >> ${REPORT}
echo "" >> ${REPORT}
# Test if metadata have changed
export METADATA_DIFF="/tmp/metadata.diff"
diff -u <(jq --sort-keys .metadata benchmark-results/lab-benchmark.json) <(jq --sort-keys .metadata lab-benchmark-expected.json) > ${METADATA_DIFF} || true
if [[ -s ${METADATA_DIFF} ]]; then
echo "<details><summary>:exclamation: Test metadata have changed</summary>" >> ${REPORT}
echo "" >> ${REPORT}
echo "\`\`\`diff" >> ${REPORT}
cat ${METADATA_DIFF} >> ${REPORT}
echo "\`\`\`" >> ${REPORT}
echo "" >> ${REPORT}
echo "</details>" >> ${REPORT}
fi
# Set the report as summary
cat ${REPORT} >> ${GITHUB_STEP_SUMMARY}
# Copy the reference data to upload it as artifact
cp lab-benchmark-expected.json ./benchmark-results/
# Save PR number for comment publication
echo "${PULL_REQUEST_ID}" > ./benchmark-results/NR
- name: Upload Galata Test assets
if: always()
uses: actions/upload-artifact@v3
with:
name: benchmark-assets
path: |
galata/benchmark-results
galata/test-results
- name: Print JupyterLab logs
if: always()
run: |
cat /tmp/jupyterlab_server_old.log
cat /tmp/jupyterlab_server_new.log
memory-leak:
name: Execute memory-leak tests
if: github.event_name == 'schedule' || (github.event_name == 'pull_request_review' && (github.event.review.state == 'approved' || contains(github.event.review.body, 'please run benchmark')))
runs-on: ubuntu-22.04
timeout-minutes: 30
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Get hashes for PR review event
if: ${{ github.event_name == 'pull_request_review' }}
uses: actions/github-script@v6
with:
script: |
const child_process = require("child_process");
const pull_request = context.payload.pull_request;
core.exportVariable('PULL_REQUEST_ID', pull_request.number);
- name: Base Setup
uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1
- name: Install dependencies
run: |
bash ./scripts/ci_install.sh
# Build dev-mode
jlpm run build
- name: Launch JupyterLab
shell: bash
run: |
# Mount a volume to overwrite the server configuration
jlpm start > /tmp/jupyterlab_server.log 2>&1 &
working-directory: galata
- name: Execute memory leak tests
id: memory_leaks
uses: jupyterlab/benchmarks/.github/actions/memory-leak@v1
with:
server_url: localhost:8888
artifacts_name: benchmark-assets
- name: Kill the server
if: always()
shell: bash
run: |
kill -s SIGKILL $(pgrep jupyter-lab)
- name: Print JupyterLab logs
if: always()
shell: bash
run: |
cat /tmp/jupyterlab_server.log