Skip to content

Use CM cache for mlperf inference submission generation #468

Use CM cache for mlperf inference submission generation

Use CM cache for mlperf inference submission generation #468

# This workflow will add/update the default dockerfile for any updated CM scripts
name: Dockerfile update for CM scripts
on:
push:
branches: [ "master", "dev" ]
paths:
- 'cm-mlops/script/**_cm.json'
jobs:
dockerfile:
if: github.repository == 'mlcommons/ck'
runs-on: ubuntu-latest
steps:
- name: 'Checkout'
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Get changed files
id: getfile
run: |
echo "files=$(git diff --name-only ${{ github.event.before }} | xargs)" >> $GITHUB_OUTPUT
- name: Update dockerfile
run: |
for file in ${{ steps.getfile.outputs.files }}; do
echo $file
done
python3 -m pip install cmind
cm pull repo --url=https://github.com/${{ github.repository }} --checkout=${{ github.ref_name }}
python3 tests/script/process_dockerfile.py ${{ steps.getfile.outputs.files }}
FOLDER=`cm find repo mlcommons@ck | cut -d' ' -f3`
USER=cTuning
EMAIL=admin@ctuning.org
git config --global user.name "$USER"
git config --global user.email "$EMAIL"
git remote set-url origin https://x-access-token:${{ secrets.ACCESS_TOKEN }}@github.com/${{ github.repository }}
git add *.Dockerfile
git diff-index --quiet HEAD || (git commit -am "Updated dockerfile" && git push)