-
Notifications
You must be signed in to change notification settings - Fork 147
205 lines (183 loc) · 6.78 KB
/
cifar_benchmark.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
name: CIFAR-10 benchmark CML
on:
workflow_dispatch:
inputs:
git-ref:
description: Repo reference (branch, tag or SHA)
default: "main"
required: true
type: string
benchmark:
description: Benchmark to run (cifar-10-8b or cifar-10-16b)
default: "cifar-10-16b"
type: choice
options:
- "cifar-10-8b"
- "cifar-10-16b"
instance_type:
description: Instance type on which to launch benchmarks
default: "hpc7a.96xlarge"
type: choice
options:
- "m6i.metal"
- "u-6tb1.112xlarge"
- "hpc7a.96xlarge"
num_samples:
description: Number of samples to use
default: "3"
type: string
required: true
p_error:
description: P-error to use
default: "0.01"
type: string
required: true
# FIXME: Add recurrent launching
# https://github.com/zama-ai/concrete-ml-internal/issues/1851
# Global environnement variables
env:
# Github action url (used by slack notification)
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
AGENT_TOOLSDIRECTORY: /opt/hostedtoolcache
RUNNER_TOOL_CACHE: /opt/hostedtoolcache
SLAB_PROFILE: big-cpu
# Jobs
jobs:
setup-ec2:
name: Setup EC2 instance
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
instance-id: ${{ steps.start-instance.outputs.ec2-instance-id }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@867256e1784c920d89ce3411ffa031420c7ced11
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
profile: ${{ env.SLAB_PROFILE }}
run-cifar-10:
needs: [setup-ec2]
name: Run benchmark
runs-on: ${{ needs.setup-ec2.outputs.runner-name }}
env:
PIP_INDEX_URL: ${{ secrets.PIP_INDEX_URL }}
PIP_EXTRA_INDEX_URL: ${{ secrets.PIP_EXTRA_INDEX_URL }}
steps:
- name: Add masks
run: |
echo "::add-mask::${{ secrets.INTERNAL_PYPI_URL_FOR_MASK }}"
echo "::add-mask::${{ secrets.INTERNAL_REPO_URL_FOR_MASK }}"
echo "::add-mask::${{ secrets.INTERNAL_PYPI_URL }}"
echo "::add-mask::${{ secrets.INTERNAL_REPO_URL }}"
- name: Checkout code
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
lfs: true
ref: ${{ github.event.inputs.git-ref }}
- name: Set up Python
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
with:
python-version: "3.8"
- name: Install dependencies
id: install-deps
run: |
apt update
apt install --no-install-recommends -y gnome-keyring
apt install -y graphviz* graphviz-dev libgraphviz-dev pkg-config python3-dev
apt-mark hold docker.io
./script/make_utils/setup_os_deps.sh
make setup_env
# CIFAR-10-8b benchmark
- name: Benchmark - CIFAR-10-8b
if: github.event.inputs.benchmark == 'cifar-10-8b'
run: |
source .venv/bin/activate
NUM_SAMPLES=${{ github.event.inputs.num_samples }} python3 ./use_case_examples/cifar/cifar_brevitas_with_model_splitting/infer_fhe.py
python3 ./benchmarks/convert_cifar.py --model-name "8-bit-split-v0"
# CIFAR-10-16b benchmark
- name: Benchmark - CIFAR-10-16b
if: github.event.inputs.benchmark == 'cifar-10-16b'
run: |
source .venv/bin/activate
NUM_SAMPLES=${{ github.event.inputs.num_samples }} P_ERROR=${{ github.event.inputs.p_error }} python3 ./use_case_examples/cifar/cifar_brevitas_training/evaluate_one_example_fhe.py
python3 ./benchmarks/convert_cifar.py --model-name "16-bits-trained-v0"
- name: Archive raw predictions
uses: actions/upload-artifact@v4.3.0
with:
name: predictions.csv
path: inference_results.csv
- name: Archive metrics
uses: actions/upload-artifact@v4.3.0
with:
name: metrics.json
path: to_upload.json
- name: Archive MLIR
uses: actions/upload-artifact@v4.3.0
with:
name: mlir.txt
path: cifar10.mlir
- name: Archive Graph
uses: actions/upload-artifact@v4.3.0
with:
name: graph.txt
path: cifar10.graph
- name: Archive client
uses: actions/upload-artifact@v4.3.0
with:
name: client.zip
path: client_server/client.zip
- name: Archive server
uses: actions/upload-artifact@v4.3.0
with:
name: server.zip
path: client_server/server.zip
# We need to keep this as the last step to avoid not uploading the artifacts
# if the step crashes
- name: Upload results
id: upload-results
run: |
# Log the json
cat to_upload.json | jq
# We need to sleep to avoid log issues
sleep 1.
# Upload the json to the benchmark database
curl --fail-with-body \
-H "Authorization: Bearer ${{ secrets.NEW_ML_PROGRESS_TRACKER_TOKEN }}" \
-H "Content-Type: application/json; charset=UTF-8" \
-d @to_upload.json \
-X POST "${{ secrets.NEW_ML_PROGRESS_TRACKER_URL }}experiment"
teardown-ec2:
name: Teardown EC2 instance (fast-tests)
if: ${{ always() }}
needs: [ setup-ec2, run-cifar-10 ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@ab65ad70bb9f9e9251e4915ea5612bcad23cd9b1
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
profile: ${{ env.SLAB_PROFILE }}
label: ${{ needs.setup-ec2.outputs.runner-name }}
slack-notification:
runs-on: ubuntu-20.04
needs: [run-cifar-10]
steps:
- name: Slack Notification
if: ${{ always() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_COLOR: ${{ needs.run-cifar-10.result }}
SLACK_MESSAGE: "Benchmark action: ${{ github.event.inputs.benchmark }} (${{ env.ACTION_RUN_URL }}) ended with result: ${{ needs.run-cifar-10.result }}"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}