Skip to content

adding lllama fairscale #3433

adding lllama fairscale

adding lllama fairscale #3433

Workflow file for this run

name: CI GPU
on:
workflow_dispatch:
push:
branches:
- master
pull_request:
branches:
- master
merge_group:
concurrency:
group: ci-gpu-${{ github.workflow }}-${{ github.ref == 'refs/heads/master' && github.run_number || github.ref }}
cancel-in-progress: true
jobs:
ci-gpu:
runs-on: [self-hosted, ci-gpu]
steps:
- name: Clean up previous run
run: |
echo "Cleaning up previous run"
ls -la ./
# deletes all files in the current directory
sudo rm -rf ./* || true
# deletes all hidden files in a directory
sudo rm -rf ./.??* || true
ls -la ./
- name: Setup Python 3.8
uses: actions/setup-python@v4
with:
python-version: 3.8
architecture: x64
- name: Setup Java 17
uses: actions/setup-java@v3
with:
distribution: 'zulu'
java-version: '17'
- name: Checkout TorchServe
uses: actions/checkout@v3
- name: Install dependencies
run: |
python ts_scripts/install_dependencies.py --environment=dev --cuda=cu121
- name: Torchserve Sanity
uses: nick-fields/retry@v2
with:
timeout_minutes: 60
retry_on: error
max_attempts: 3
command: |
python torchserve_sanity.py
# Any coverage.xml will be picked up by this step
# Just make sure each coverage.xml is in a different folder
- name: Upload codecov
run : |
curl -Os https://uploader.codecov.io/latest/linux/codecov
chmod +x codecov
./codecov