fix ci syntax #90
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: CI/CD Pipeline | |
# Continuous Integration / Continuous Delivery | |
# Triggers on all branches and tags starting with v | |
# Full Job Matrix for Stress Testing is activated on 'master', 'dev' and tags | |
## We Test factoring Platforms and Python versions | |
# For other branches only one Job is spawned for Running (Unit) Tests | |
# PyPI publish on v* tags on 'master' branch | |
# Test PyPI publish on v* 'pre-release' tags on 'release' branch | |
# Dockerhub publish on all branches and tags | |
on: | |
push: | |
branches: | |
- "*" | |
tags: | |
- v* | |
# from gh docs: "A map of variables that are available to the steps of all jobs in the workflow." | |
env: | |
## Pipeline Constants ## | |
# Job Matrix as an env var ! | |
FULL_MATRIX_STRATEGY: "{\"platform\": [\"ubuntu-latest\", \"macos-latest\", \"windows-latest\"], \"python-version\": [\"3.8\", \"3.9\", \"3.10\", \"3.11\"]}" | |
UBUNTU_PY38_STRATEGY: "{\"platform\":[\"ubuntu-latest\"], \"python-version\":[\"3.8\"]}" | |
TEST_STRATEGY: "{\"platform\":[\"ubuntu-latest\", \"macos-latest\", \"windows-latest\"], \"python-version\":[\"3.9\"]}" | |
##### Pipeline Settings #### | |
# On/Off switches for the various CI/CD steps | |
# If false the step prevented from running | |
# If true the step is allowed to run (it still may be skipped on other runtime conditions) | |
RUN_UNIT_TESTS: "false" | |
RUN_LINT_CHECKS: "false" | |
PUBLISH_ON_PYPI: "false" | |
DRAW_DEPENDENCIES: "false" | |
PREVENT_CODECOV_TEST_COVERAGE: "false" # Codecov Job runs only on 'false' | |
# Hard switch to turn on/off the Docker build and publish to Dockerhub | |
DOCKER_JOB_ON: "true" | |
#### Behaviour - Docker build and Publich #### | |
# Override policy-dependent decision-making and | |
# Accept any ALL (branch/build) to Publish to Dockerhub | |
ALWAYS_BUILD_N_PUBLSIH_DOCKER: "false" | |
# Docker Policy | |
## Continous Integration / Continuous Deployment | |
## CDeployment : Builds and Publishes only if Tests ran and passed | |
# DOCKER_JOB_POLICY: "CDeployment" | |
## Continous Integration / Continuous Delivery | |
## CDelivery : Builds and Publishes if Tests Passed or if Tests were Skipped | |
DOCKER_JOB_POLICY: "CDelivery" | |
## Lower level config ## | |
# 2 bit state machine | |
# 0 0 pure CI/CD mode, aka Admit if Pass, Require Pass, guarantee quality | |
# 0 1 Always build and publish, aka Admit All | |
# 1 0 CI/CD with Bypass Opt, aka Admit Tested and when Test is OFF, Admit when Test OFF | |
# 1 1 Never build and publish, aka No Admitance, guarantee NO Dockerhub publish | |
# alternative 2 bits representations: | |
# 1. Accept all for Docker 1/0 (1bit), Mode (CICD, or CICD + allow if test OFF) (1bit) | |
# 2. No Docker Bypass 0/1 (1 bit), Accept all 0/1 (1 bit). | |
# Define the Policy that our Job uses to decide whether to run the Docker build and publish to Dockerhub | |
# Pol 1: Run Docker build and publish to Dockerhub only if Tests passed (and ran!) | |
# aka: Continous Integration / Continuous Deployment | |
# aka: guranteed quality, everything published is tested | |
# aka: 'required succesful evaluation', 'accept only a Pass' | |
# accepted criteria (any of which suffices): [PASS] | |
# if Docker Job is | |
# DOCKER_JOB_POLICY: "CI_CD" | |
# Pol 2: Always run Docker build and publish to Dockerhub | |
# aka 'Free Admitance', 'No Entry Barrier', 'Free Entrance' | |
# DOCKER_JOB_POLICY: "ALWAYS_RUN" | |
# Pol 3: Run Docker build and publish to Dockerhub, if Tests ran and passed, but also if Tests were skipped | |
# aka: Continous Integration / Continuous Delivery | |
# aka: 'accepted_when_Test_OFF', 'accept a Pass or a Skip' | |
# aka: | |
# accepted criteria (any of which suffices): [PASS, TESTS_SKIPPED] | |
# DOCKER_JOB_POLICY: "RUN_ON_TEST_PASS_OR_SKIP" | |
# WIP Pol 4: Never run Docker build and publish to Dockerhub | |
# aka: garantee that nothing is published | |
# aka: 'Shutdown', 'No Admitance' | |
# DOCKER_JOB_POLICY: "NEVER_RUN" | |
#### End Behaviour #### | |
##### END Pipeline Settings ###### | |
## Static Code Analysis Parameters ## | |
# Python Runtime version to set the Job runner with | |
STATIC_ANALYSIS_PY: "3.8" # since our pyproject is tested to support 3.8 builds | |
# Pylint Score Threshold, if the score is below this value the Job will fail | |
# If pylint rated our code below that score, the Job fails | |
PYLINT_SCORE_THRESHOLD: "8.2" | |
jobs: | |
# we use the below to read the workflow env vars and be able to use in "- if:" Job conditionals | |
# now we can do -> if: ${{ needs.set_github_outputs.outputs.TESTS_ENABLED == 'true' }} | |
# github does not have a way to simply do "- if: ${{ env.RUN_UNIT_TESTS == 'true' }} " !! | |
set_github_outputs: | |
name: Read Workflow Env Section Vars and set Github Outputs | |
runs-on: ubuntu-latest | |
outputs: | |
matrix: ${{ steps.pass-env-to-output.outputs.matrix }} | |
TESTS_ENABLED: ${{ steps.pass-env-to-output.outputs.TESTS_ENABLED }} | |
# DOCKER_JOB_ON: ${{ steps.pass-env-to-output.outputs.DOCKER_JOB_ON }} | |
DOCKER_POLICY: ${{ steps.pass-env-to-output.outputs.DOCKER_POLICY }} | |
DRAW_DEPS_SVG_GRAPHS: ${{ steps.pass-env-to-output.outputs.DRAW_DEPS_SVG_GRAPHS }} | |
RUN_LINT: ${{ steps.pass-env-to-output.outputs.RUN_LINT }} | |
PUBLISH_ON_PYPI: ${{ steps.pass-env-to-output.outputs.PUBLISH_ON_PYPI }} | |
PREVENT_CODECOV_TEST_COVERAGE: ${{ steps.pass-env-to-output.outputs.PREVENT_CODECOV_TEST_COVERAGE }} | |
steps: | |
- name: Pass 'env' section variables to GITHUB_OUTPUT | |
id: pass-env-to-output | |
run: | | |
# set the matrix strategy to Full Matrix Stress Test if on master/main or stress-test branch or any tag | |
BRANCH_NAME=${GITHUB_REF_NAME} | |
if [[ $BRANCH_NAME == "master" || $BRANCH_NAME == "main" || $BRANCH_NAME == "stress-test" || $GITHUB_REF == refs/tags/* ]]; then | |
echo "matrix=$UBUNTU_PY38_STRATEGY" >> $GITHUB_OUTPUT | |
else | |
echo "matrix=$UBUNTU_PY38_STRATEGY" >> $GITHUB_OUTPUT | |
fi | |
echo "DRAW_DEPS_SVG_GRAPHS=$DRAW_DEPENDENCIES" >> $GITHUB_OUTPUT | |
echo "RUN_LINT=$RUN_LINT_CHECKS" >> $GITHUB_OUTPUT | |
echo "TESTS_ENABLED=$RUN_UNIT_TESTS" >> $GITHUB_OUTPUT | |
echo "PUBLISH_ON_PYPI=$PUBLISH_ON_PYPI" >> $GITHUB_OUTPUT | |
echo "PREVENT_CODECOV_TEST_COVERAGE=$PREVENT_CODECOV_TEST_COVERAGE" >> $GITHUB_OUTPUT | |
## Lower level config ## | |
# 2 bit state machine | |
# 0 0 = 0: pure CI/CD mode, aka Admit if Pass, Require Pass, guarantee quality | |
# 0 1 = 1: Always build and publish, aka Admit All | |
# 1 0 = 2: CI/CD with Bypass Opt, aka Admit Tested and when Test is OFF, Admit when Test OFF | |
# 1 1 = 3: Never build and publish, aka No Admitance, guarantee NO Dockerhub publish | |
if [[ $DOCKER_JOB_ON == "true" ]]; then | |
if [[ $DOCKER_JOB_POLICY == "ALWAYS_BUILD_N_PUBLSIH_DOCKER" ]]; then | |
echo "DOCKER_POLICY=1" >> $GITHUB_OUTPUT | |
elif [[ $DOCKER_JOB_POLICY == "CDeployment" ]]; then | |
echo "DOCKER_POLICY=0" >> $GITHUB_OUTPUT | |
elif [[ $DOCKER_JOB_POLICY == "CDelivery" ]]; then | |
echo "DOCKER_POLICY=2" >> $GITHUB_OUTPUT | |
fi | |
else | |
echo "DOCKER_POLICY=3" >> $GITHUB_OUTPUT | |
fi | |
# RUN TEST SUITE ON ALL PLATFORMS | |
test_suite: | |
runs-on: ${{ matrix.platform }} | |
needs: set_github_outputs | |
if: ${{ needs.set_github_outputs.outputs.TESTS_ENABLED == 'true' }} | |
strategy: | |
matrix: ${{fromJSON(needs.set_github_outputs.outputs.matrix)}} | |
outputs: | |
SEMVER_PIP_FORMAT: ${{ steps.parse_version.outputs.SEMVER_PIP_FORMAT }} | |
steps: | |
- run: echo "[INFO] Platform/OS ${{ matrix.platform }} , Python -> ${{ matrix.python-version }}" | |
- uses: actions/checkout@v3 | |
- name: Set up Python ${{ matrix.python-version }} | |
uses: actions/setup-python@v4 | |
with: | |
python-version: ${{ matrix.python-version }} | |
- run: python -m pip install --upgrade pip && python -m pip install tox==3.28 tox-gh-actions | |
# - name: Do Type Checking | |
# run: tox -e type -vv -s false | |
- name: Parse package version from __init__.py to assist building | |
shell: bash | |
id: parse_version | |
run: | | |
# The Developer must have specified the necessary information in their pyproject.toml: | |
# - Pyproject.toml MUST have the `[tool.software-release]` section. | |
# - The Section MUST have the `version_variable` key | |
# - The Key value MUST follow format: "<path_to_file_with_sem_ver_value>:<symbol_name>" | |
# - <path_to_file_with_sem_ver_value> MUST be relative to pyproject.toml | |
# - File MUST have a row starting with the <symbol_name> followed by a "=" and a String Value | |
# - String Value MUST be quoted, with either double (") or single (') quotes | |
# - MUST match Reg Ex: \s*=\s*[\'\"]([^\'\"]*)[\'\"] | |
# Use Case: Single Source of Truth for Code Sem Ver | |
# A Python Dev wants to have Source of Truth for their Software Semantic Version, to benefit from: | |
# - Having only one "thing" to maintain, rather than many, reducing work, semantic load, probability of human error | |
# We assume the Dev opts for storing and mainting this information inside a .py file (which belongs to their source distribution) | |
# We assume they simply define a python (str) variable, in global scope, and assign the Sem Ver string directly. | |
# Eg: __version__ = "0.0.1", or __version__ = '1.0.1-dev' | |
# That way they can also read the Sem Ver of the software at runtime, by importing the value since it is inside a python file | |
## EXAMPLE Valid Setup ## | |
## File pyproject.toml: | |
# [tool.software-release] | |
# version_variable = "src/artificial_artwork/__version__.py:__version__" | |
## File src/artificial_artwork/__version__.py: | |
# __version__ = "0.0.1" # or __version__ = '1.0.1-dev' | |
## EXAMPLE END ## | |
# The below Parser requires all above MUST Conditions to be met, and will fail if not. | |
PARSER="scripts/parse_version.py" | |
PARSED_VERSION=$(python "${PARSER}") | |
echo "==== Version Parsed: ${PARSED_VERSION} ====" | |
echo "[INFO] String: ${PARSED_VERSION} parsed" | |
echo "[DEBUG] We expect it to be Sem Ver 2.0 compliant" | |
# handle cases where user sem ver has prerelease info | |
# these cases result in artifacts (ie products of doing an 'sdist' or 'wheel' build) | |
# having names (in filesystem), that are slightly different then the ones our tool chain | |
# produces when sem ver is stricly only Major.minor.Patch | |
# ie if user supplied sem ver 1.7.4-rc.1), building a wheel produces a file with '1.7.4rc1' in its name | |
# ie if user supplied sem ver 1.7.4-dev), building a wheel produces a file with '1.7.4-dev0' in its name | |
# manually append the 0 to index the release candidate | |
# we account for wheel building that automatically does the above | |
# OPT 1, performs inpput validation and shows rich output and troubleshooting messages, in case of error | |
PROCESS_SEM_VER="scripts/process_sem_ver.py" | |
WHEEL_VERSION=$(python "${PROCESS_SEM_VER}" "${PARSED_VERSION}") | |
# OPT 2, the smaller the better | |
# WHEEL_VERSION=$(python -c 'import sys; s = sys.argv[1]; print( s if "-" not in sys.argv[1] else s.replace("-", ".") + "0" ); ' "${PARSED_VERSION}") | |
# OPT 3, the smallest, and use shell as entrpoint, but least readable | |
# WHEEL_VERSION=$(echo $PARSED_VERSION | sed -E 's/([^.]*)\.([^.]*)\.([^-]*)-(rc)\.?(.*)/\1.\2.\3\4\5/') | |
# last_two=${WHEEL_VERSION: -2} | |
# if [[ $last_two == "rc" ]]; then | |
# WHEEL_VERSION="${WHEEL_VERSION}0" | |
# fi | |
echo "==== Distribution Version $WHEEL_VERSION derived from $PARSED_VERSION ====" | |
if [[ -z "$WHEEL_VERSION" ]]; then | |
echo "[ERROR] Failed to derive Distribution Version from $PARSED_VERSION" | |
exit 1 | |
fi | |
# WHEEL_VERSION is required by: | |
# - 'twine' tool, running in Step below, locate the tar.gz for testing | |
# - deploy command to locate the tar.gz and wheel(s) file to publish to pypi | |
# to be used in the next step | |
echo "PKG_VERSION=$WHEEL_VERSION" >> $GITHUB_ENV | |
echo "SEMVER_PIP_FORMAT=$WHEEL_VERSION" >> $GITHUB_OUTPUT # to be used in other jobs | |
## TEST SUITE: By Default executes only unit-tests (ie no integration, or network-dependent tests) | |
- name: Run Unit Tests | |
run: tox -vv -s false | |
env: | |
PLATFORM: ${{ matrix.platform }} | |
# if sdist tests ran, then we expect file to ahve been created | |
# .tox/${DIST_DIR}/cookiecutter_python-${PKG_VERSION}.tar.gz | |
# if wheel tests ran, then we expect a file similar to below, to | |
# have been created. the file depends on the python version | |
# compiled (cpu architecture specific code) | |
# the below exaple is what to expect from 'pure python' build ( | |
# meaning in theory there is no machine/cpu-specific code, no byte code, | |
# no compiled code | |
# .tox/${DIST_DIR}/cookiecutter_python-${PKG_VERSION}-py3-none-any.whl | |
- name: "Aggregate Code Coverage & make XML Reports" | |
id: produce_coverage_xml_file | |
env: | |
# just "destructure" (aka extract) needed values from the matrix, to use in step code | |
PLATFORM: ${{ matrix.platform }} | |
PY_VERSION: ${{ matrix.python-version }} | |
run: | | |
tox -e coverage --sitepackages -vv -s false | |
RUNNER_COVERAGE_XML_FILE_PATH="coverage-${PLATFORM}-${PY_VERSION}.xml" | |
mv ./.tox/coverage.xml "${RUNNER_COVERAGE_XML_FILE_PATH}" | |
# leverages ./scripts/post-tests-run.sh which returns the path of the XML Aggregated Coverage DataXML Filecoverage report | |
# chmod +x ./scripts/post-tests-run.sh | |
# RUNNER_COVERAGE_XML_FILE_PATH=$(./scripts/post-tests-run.sh "${PLATFORM}-${PY_VERSION}") | |
echo "CI_COVERAGE_XML=$RUNNER_COVERAGE_XML_FILE_PATH" >> $GITHUB_OUTPUT | |
echo "CI_COVERAGE_XML_THIS=$RUNNER_COVERAGE_XML_FILE_PATH" >> $GITHUB_ENV | |
- name: "Upload Test Coverage as Artifacts" | |
uses: actions/upload-artifact@v3 | |
with: | |
name: all_coverage_raw | |
path: ${{ env.CI_COVERAGE_XML_THIS }} | |
# steps.produce_coverage_xml_file.outputs.retval | |
# path: coverage-${{ matrix.platform }}-${{ matrix.python-version }}.xml | |
if-no-files-found: error | |
- name: Check for compliance with Python Best Practices | |
shell: bash | |
run: | | |
DIST_DIR=dist | |
echo "DIST_DIR=dist" >> $GITHUB_ENV # can be uesd in a with body of a next step in the Job, as eg: path: ${{ env.DIST_DIR }} | |
mkdir ${DIST_DIR} | |
TOXENV_DIST_DIR=".tox/${DIST_DIR}" | |
# Sanity check of files inside | |
echo "[DEBUG] ls -la ${TOXENV_DIST_DIR}" | |
ls -la ${TOXENV_DIST_DIR} | |
# Sanity check that among all files there onle one tar.gz and one .whl | |
# if not, Fail Job (aka exit with 1) | |
# if [[ $(find ${TOXENV_DIST_DIR} -type f -name "*.tar.gz" | wc -l) -ne 1 ]]; then | |
# echo "ERROR: Expected to find 1 tar.gz file in ${TOXENV_DIST_DIR}, but found $(find ${TOXENV_DIST_DIR} -type f -name "*.tar.gz" | wc -l)" | |
# exit 1 | |
# fi | |
# if [[ $(find ${TOXENV_DIST_DIR} -type f -name "*.whl" | wc -l) -ne 1 ]]; then | |
# echo "ERROR: Expected to find 1 .whl file in ${TOXENV_DIST_DIR}, but found $(find ${TOXENV_DIST_DIR} -type f -name "*.whl" | wc -l)" | |
# exit 1 | |
# fi | |
# REQUIRED existing 'Source Distribution' (SD) aka .tar.gz file, has already been built from source code | |
# ie previous code/step ran a tox env, such as with cmd `tox -e py38-sdist-linux`, or any `sdist` tox env, | |
# which runs Unit Tests against the .tar.gz (sdist installation), after building it from source code, | |
# move .tar.gz | |
mv ".tox/${DIST_DIR}/artificial_artwork-${PKG_VERSION}.tar.gz" "${DIST_DIR}" | |
# move .whl | |
# mv ".tox/${DIST_DIR}/artificial_artwork-${PKG_VERSION}-py3-none-any.whl" "${DIST_DIR}" | |
# tox -e check -vv -s false | |
# - name: Install documentation test dependencies | |
# if: ${{ matrix.platform == 'macos-latest' && matrix.python-version != '3.6' }} | |
# run: brew install enchant | |
# - name: Run Documentation Tests | |
# if: ${{ matrix.platform == 'ubuntu-latest' || matrix.python-version != '3.6' }} | |
# run: tox -e docs --sitepackages -vv -s false | |
- name: Upload Source & Wheel distributions as Artefacts | |
uses: actions/upload-artifact@v3 | |
with: | |
name: ${{ env.DIST_DIR }}-${{ matrix.platform }}-${{ matrix.python-version }} | |
path: ${{ env.DIST_DIR }} | |
if-no-files-found: error | |
### JOB: UPLOAD COVERAGE REPORTS TO CODECOV ### | |
codecov_coverage_host: | |
runs-on: ubuntu-latest | |
needs: test_suite | |
if: ${{ needs.set_github_outputs.outputs.PREVENT_CODECOV_TEST_COVERAGE == 'false' }} | |
steps: | |
- uses: actions/checkout@v3 | |
- name: Get Codecov binary | |
run: | | |
curl -Os https://uploader.codecov.io/latest/linux/codecov | |
chmod +x codecov | |
- name: Download XML Test Coverage Results, from CI Artifacts | |
uses: actions/download-artifact@v3 | |
with: | |
name: all_coverage_raw | |
- name: Upload Coverage Reports to Codecov | |
run: | | |
for file in coverage*.xml; do | |
OS_NAME=$(echo $file | sed -E "s/coverage-(\w\+)-/\1/") | |
PY_VERSION=$(echo $file | sed -E "s/coverage-\w\+-(\d\.)\+/\1/") | |
./codecov -f $file -e "OS=$OS_NAME,PYTHON=$PY_VERSION" --flags unittests --verbose | |
echo "Sent to Codecov: $file !" | |
done | |
## DEPLOY to PYPI: PROD and STAGING | |
# Automated Upload of Builds (.tar.gz, .whl), triggered by git push (aka git ops) | |
# Deployment happens only IF | |
# - PUBLISH_ON_PYPI == 'true' | |
# - we are on 'master' or 'dev' branch | |
# - the pushed git ref is a tag starting with 'v' ie v1.0.0 | |
# For Production deployment we use the public pypi.org server. | |
# be on master branch, when you push a tag | |
# For Staging deployment we use the test.pypi.org server. | |
# be on release branch, when you push a tag | |
# - first make sure PUBLISH_ON_PYPI = true in Worklow test.yaml file | |
# To trigger automatically building your source code and deploying/uploading to PyPI, so that is becomes pip installable, you need to: | |
# - to trigger the automated deployment, push a git tag, that starts with 'v' (eg: v1.0.0, v1.2.3-dev, etc) | |
# PUBLISH DISTRIBUTIONS ON PYPI | |
# 1st Job | |
# Check if PUBLISH_ON_PYPI switch is ON, else do not attempt to deploy to PyPI | |
# Check if git ref is a tag, which starts with 'v', and that tag is on master or dev branches, | |
# else Signal that we 'do not handle such cases' (eg we want want to avoid accidental automated deployments to happen) | |
# 2nd Job | |
# If the Signal indicates that conditions are met for an Automated Deployment | |
# ... | |
check_which_git_branch_we_are_on: | |
runs-on: ubuntu-latest | |
needs: set_github_outputs | |
if: ${{ startsWith(github.event.ref, 'refs/tags/v') && needs.set_github_outputs.outputs.PUBLISH_ON_PYPI == 'true' }} | |
steps: | |
- uses: actions/checkout@v3 | |
with: | |
fetch-depth: 0 | |
- uses: rickstaa/action-contains-tag@v1 | |
id: main_contains_tag | |
with: | |
reference: "master" | |
tag: "${{ github.ref }}" | |
- run: echo "ON_MAIN_BRANCH=${{ steps.main_contains_tag.outputs.retval }}" >> $GITHUB_OUTPUT | |
- uses: rickstaa/action-contains-tag@v1 | |
id: release_contains_tag | |
with: | |
reference: "release" | |
tag: "${{ github.ref }}" | |
- run: echo "ON_RELEASE_BRANCH=${{ steps.release_contains_tag.outputs.retval }}" >> $GITHUB_OUTPUT | |
- name: Pick Production or Test Environment, if tag on master or release branch respectively | |
id: set_environment_name | |
run: | | |
DEPLOY=true | |
if [[ "${{ steps.main_contains_tag.outputs.retval }}" == "true" ]]; then | |
# Github Environment designed for Deploying to Production: DEPLOYMENT_PYPI_PROD | |
ENVIRONMENT_NAME=DEPLOYMENT_PYPI_PROD | |
elif [[ "${{ steps.release_contains_tag.outputs.retval }}" == "true" ]]; then | |
# Github Environment designed for Deploying to Staging: DEPLOYMENT_PYPI_STAGING | |
ENVIRONMENT_NAME=DEPLOYMENT_PYPI_STAGING | |
else | |
echo "A tag was pushed but not on master or release branch. No deployment will be done." | |
echo "[DEBUG] Branch name: ${GITHUB_REF_NAME}" | |
echo "[DEBUG] ${{ github.ref }}" | |
DEPLOY=false | |
fi | |
echo "SELECTED_ENVIRONMENT=$ENVIRONMENT_NAME" >> $GITHUB_ENV | |
echo "AUTOMATED_DEPLOY=$DEPLOY" >> $GITHUB_ENV | |
- run: echo "[DEBUG] Branch name ${GITHUB_REF_NAME}" | |
- run: echo " ==== ${SELECTED_ENVIRONMENT} ====" | |
- run: echo " ==== ${AUTOMATED_DEPLOY} ====" | |
- name: Set Output for other Jobs to receive | |
id: set_job_outputs | |
run: | | |
echo " --- Selected ENV NAME: $SELECTED_ENVIRONMENT " | |
echo "ENVIRONMENT_NAME=${SELECTED_ENVIRONMENT}" >> $GITHUB_OUTPUT | |
echo " --- Signal OK for Automated Deployment?: $AUTOMATED_DEPLOY " | |
echo "AUTOMATED_DEPLOY=${AUTOMATED_DEPLOY}" >> $GITHUB_OUTPUT | |
outputs: | |
ENVIRONMENT_NAME: ${{ steps.set_job_outputs.outputs.ENVIRONMENT_NAME }} | |
AUTOMATED_DEPLOY: ${{ steps.set_job_outputs.outputs.AUTOMATED_DEPLOY }} | |
### PYPI UPLOAD JOB ### | |
pypi_publish: | |
needs: [test_suite, check_which_git_branch_we_are_on] | |
# needs: check_which_git_branch_we_are_on | |
runs-on: ubuntu-latest | |
# if we are on tag starting with "v" and if we are on master or release branch | |
if: ${{ startsWith(github.event.ref, 'refs/tags/v') && needs.check_which_git_branch_we_are_on.outputs.AUTOMATED_DEPLOY == 'true' }} | |
environment: | |
name: ${{ needs.check_which_git_branch_we_are_on.outputs.ENVIRONMENT_NAME }} | |
env: | |
DIST_DIR: dist | |
DISTRO_SEMVER: ${{ needs.test_suite.outputs.SEMVER_PIP_FORMAT }} # required env var by deploy script (tox -e deploy) | |
TWINE_USERNAME: ${{ vars.TWINE_USERNAME }} | |
TWINE_PASSWORD: ${{ secrets.TWINE_PASSWORD }} | |
PYPI_SERVER: ${{ vars.PYPI_SERVER }} | |
steps: | |
- uses: actions/checkout@v3 | |
- name: Download Source & Wheel distributions | |
uses: actions/download-artifact@v3 | |
with: | |
path: downloaded-artifacts | |
- name: Get Publishable files from the Artifacts | |
run: | | |
TAG="${GITHUB_REF_NAME}" | |
TAG_SEM_VER="${TAG:1}" # remove the first character (v) | |
# Make sure the distro semver ($DISTRO_SEMVER), which is identical | |
# to the version string in the source code, when its format is | |
# Major.Minor.Patch (and slightly different when its format is M.m.p-prerelase), | |
# is simillarly either identical or slightly different | |
# when compared to the git tag ($TAG_SEM_VER) | |
# for now MUST match only the Major.Minor.Patch part | |
# Extract Major.Minor.Patch parts from DISTRO_SEMVER and TAG_SEM_VER | |
DISTRO_MMP=$(echo "$DISTRO_SEMVER" | grep -oE '^[0-9]+\.[0-9]+\.[0-9]+') | |
TAG_MMP=$(echo "$TAG_SEM_VER" | grep -oE '^[0-9]+\.[0-9]+\.[0-9]+') | |
if [ "$DISTRO_MMP" = "$TAG_MMP" ]; then | |
echo "Major.Minor.Patch part of DISTRO_SEMVER matches TAG_SEM_VER" | |
else | |
echo "[ERROR] Major.Minor.Patch part of DISTRO_SEMVER does not match TAG_SEM_VER" | |
echo "DISTRO_SEMVER=$DISTRO_SEMVER" | |
echo "TAG_SEM_VER=$TAG_SEM_VER" | |
echo "DISTRO_MMP=$DISTRO_MMP" | |
echo "TAG_MMP=$TAG_MMP" | |
exit 1 | |
fi | |
echo "PACKAGE_DIST_VERSION=$DISTRO_SEMVER" >> $GITHUB_ENV | |
- run: mkdir ${DIST_DIR} | |
- run: | | |
# Get Source Distribution (tar.gz of source code) | |
source_distributions=$(find downloaded-artifacts -type f -name artificial_artwork*.tar.gz) | |
source_distributions_array=($source_distributions) | |
source_distribution=${source_distributions_array[0]} # a *.tar.gz file path | |
# Extract the base name (without the path) | |
source_distribution_name=$(basename "$source_distribution") | |
# Check if all source distribution (.tar.gz) files have the same name | |
for file in "${source_distributions_array[@]}"; do | |
if [ "$source_distribution_name" != "$(basename "$file")" ]; then | |
echo "Error: Not all Source Distribution .tar.gz files have the same name!" | |
exit 1 | |
fi | |
done | |
echo " -- Will Upload : $source_distribution_name -- " | |
echo "source_distribution=$source_distribution" >> $GITHUB_ENV | |
cp "$source_distribution" ${DIST_DIR} | |
- run: | | |
# Get all built Wheels and copy to dist folder | |
for f in `find downloaded-artifacts -type f -name artificial_artwork*.whl`; do | |
echo "F: $f"; | |
# TODO check for duplicates, which means that our build matrix produces the same wheel (need a different compiler that python such as pypy, cython, etc) | |
cp $f ${DIST_DIR} | |
done | |
- name: Install Dependencies | |
run: pip install tox==3.28 | |
- run: echo "Publishing $PACKAGE_DIST_VERSION to $PYPI_SERVER PyPI" | |
- name: Publish to PyPI | |
# env: | |
# TWINE_USERNAME: ${{ vars.TWINE_USERNAME }} | |
# TWINE_PASSWORD: ${{ secrets.TWINE_PASSWORD }} | |
# PYPI_SERVER: ${{ vars.PYPI_SERVER }} | |
run: tox -vv -s false -e deploy | |
# run: tox -vv -s false -e deploy -- upload --non-interactive --skip-existing | |
# runs: twine upload --non-interactive --repository testpypi --skip-existing ./dist/PY_PACKAGE-:Version.tar.gz --verbose | |
# runs: twine upload --non-interactive --skip-existing --repository testpypi --skip-existing ./dist/PY_PACKAGE-:Version.tar.gz --verbose | |
- run: echo "Published :\)" | |
## AUTOMATED DOCKER BUILD and PUBLISH ON DOCKERHUB ## | |
docker_build: | |
runs-on: ubuntu-latest | |
needs: test_suite | |
if: ${{ needs.set_github_outputs.outputs.DOCKER_POLICY == '1' || ( | |
needs.test_suite.result == 'success' || | |
needs.test_suite.result == 'skipped' && needs.set_github_outputs.outputs.DOCKER_JOB_POLICY == '2' | |
) }} | |
# if: ${{ needs.set_github_outputs.outputs.DOCKER_POLICY == '1' && ( | |
# needs.set_github_outputs.outputs.always_run == 'true' || | |
# needs.test_suite.result == 'success' || | |
# needs.test_suite.result == 'skipped' && needs.set_github_outputs.outputs.DOCKER_JOB_POLICY == 'RUN_ON_TEST_PASS_OR_SKIP' | |
# ) }} | |
env: | |
DOCKER_USER: ${{ secrets.DOCKER_USER }} | |
steps: | |
- uses: actions/checkout@v3 | |
- name: Build 'n Push Docker Image to DockerHub | |
run: | | |
# workflow enabled for branches and v* tags | |
IMAGE_TAG="${GITHUB_REF_NAME}" # this is branch name or tag name | |
echo "IMAGE_REF=neural-style-transfer-cli:${IMAGE_TAG}" >> $GITHUB_ENV | |
# build an image that does not have vgg model weights | |
# TODO: add image model weights file in this env somehow | |
docker build --target prod_install -t "${DOCKER_USER}/neural-style-transfer:${IMAGE_TAG}" . | |
- name: Publish Docker Image to DockerHub | |
env: | |
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} | |
run: | | |
echo "${DOCKER_PASSWORD}" | docker login --username "${DOCKER_USER}" --password-stdin | |
docker push "${DOCKER_USER}/${IMAGE_REF}" | |
docker logout | |
echo "Published in Dockerhub :)" | |
# STATIC CODE ANALYSIS & LINTING | |
lint: | |
name: "Static Code Analysis & Lint" | |
runs-on: ubuntu-latest | |
needs: set_github_outputs | |
if: ${{ needs.set_github_outputs.outputs.RUN_LINT == 'true' }} | |
steps: | |
- uses: actions/checkout@v3 | |
- name: Read Pipe Parameter STATIC_ANALYSIS_PY, to determine Python runtime to use for Static Code Analysis | |
run: echo "MY_STATIC_ANALYSIS_PY_VERSION=$STATIC_ANALYSIS_PY" >> $GITHUB_ENV # can be used in a with body of a next step in the Job, as eg: path: ${{ env.DIST_DIR }} | |
- name: Set up Python ${{ env.STATIC_ANALYSIS_PY }} | |
uses: actions/setup-python@v4 | |
with: | |
python-version: ${{ env.STATIC_ANALYSIS_PY }} | |
- name: Install dependencies | |
run: | | |
python -m pip install --upgrade pip | |
python -m pip install tox==3.28 | |
## Isort ## | |
- name: "Isort\\: Require Semantic and Alphabetic order of the Python Imports" | |
if: ${{ matrix.platform != 'windows-latest' }} | |
run: tox -e isort -vv -s false | |
## Black ## | |
- name: "Black\\: Require Project Style to be followed by the Python Code" | |
if: ${{ matrix.platform != 'windows-latest' }} | |
run: tox -e black -vv -s false | |
## Pylint ## | |
- name: Run Pylint tool on Python Code Base | |
run: TOXPYTHON="python${STATIC_ANALYSIS_PY}" tox -e pylint -vv -s false | tee pylint-result.txt | |
- name: Show Pylint output in Terminal | |
run: cat pylint-result.txt | |
- name: "Accept Code if Pylint Score > 8.2/10" | |
if: ${{ matrix.platform != 'windows-latest' }} | |
run: | | |
SCORE=`sed -n 's/^Your code has been rated at \([-0-9.]*\)\/.*/\1/p' pylint-result.txt` | |
echo "SCORE -> $SCORE" | |
# threshold check | |
if awk "BEGIN {exit !($SCORE >= $PYLINT_SCORE_THRESHOLD)}"; then | |
echo "PyLint Passed! | Score: ${SCORE} out of 10 | Threshold: ${PYLINT_SCORE_THRESHOLD}" | |
else | |
echo "PyLint Failed! | Score: ${SCORE} out of 10 | Threshold: ${PYLINT_SCORE_THRESHOLD}" | |
exit 1 | |
fi | |
## Pyflakes, Pyroma, McCabe, DodgyRun, Profile Validator ## | |
- name: Check for errors, potential problems, convention violations and complexity, by running tools Pyflakes, Pyroma, McCabe, and DodgyRun | |
if: ${{ matrix.platform != 'windows-latest' }} | |
run: tox -e prospector -vv -s false | |
# DRAW PYTHON DEPENDENCY GRAPHS | |
check_trigger_draw_dependency_graphs: | |
runs-on: ubuntu-latest | |
name: Draw Python Dependency Graphs ? | |
needs: set_github_outputs | |
if: needs.set_github_outputs.outputs.DRAW_DEPS_SVG_GRAPHS == 'true' | |
outputs: | |
SHOULD_DRAW_GRAPHS: ${{ steps.decide_if_should_draw_graphs.outputs.SHOULD_DRAW_GRAPHS }} | |
steps: | |
- name: Checkout code | |
uses: actions/checkout@v3 | |
with: | |
fetch-depth: 2 | |
- name: Decide if should draw graphs | |
id: decide_if_should_draw_graphs | |
run: | | |
# if branch is master or dev; or if we are on tag starting with "v" | |
if [[ ${GITHUB_REF_NAME} == "master" || ${GITHUB_REF_NAME} == "dev" || "${GITHUB_REF}" =~ refs/tags/v.* ]]; then | |
SHOULD_DRAW_GRAPHS=true | |
else | |
echo "=============== list modified files ===============" | |
git diff --name-only HEAD^ HEAD | |
echo "========== check paths of modified files ==========" | |
git diff --name-only HEAD^ HEAD > files.txt | |
SHOULD_DRAW_GRAPHS=false | |
while read file; do | |
echo $file | |
if [[ $file =~ ^src/ ]]; then | |
echo "This modified file is under the 'src' folder." | |
SHOULD_DRAW_GRAPHS=true | |
break | |
fi | |
done < files.txt | |
fi | |
echo "SHOULD_DRAW_GRAPHS=$SHOULD_DRAW_GRAPHS" >> $GITHUB_OUTPUT | |
draw-dependencies: | |
runs-on: ubuntu-latest | |
needs: check_trigger_draw_dependency_graphs | |
if: needs.check_trigger_draw_dependency_graphs.outputs.SHOULD_DRAW_GRAPHS == 'true' | |
name: Draw Python Dependencies as Graphs, in .svg | |
steps: | |
- uses: actions/checkout@v3 | |
- name: Set up Python 3.8 | |
uses: actions/setup-python@v4 | |
with: | |
python-version: '3.8' | |
- name: Install tox | |
run: | | |
python -m pip install --upgrade pip | |
python -m pip install tox==3.28 | |
- name: Install dependencies (ie dot binary of graphviz) | |
run: | | |
sudo apt-get update -y --allow-releaseinfo-change | |
sudo apt-get install -y graphviz | |
- name: Draw Dependency Graphs as .svg files | |
run: TOXPYTHON=python tox -e pydeps -vv -s false | |
- name: Upload Dependency Graphs as artifacts | |
uses: actions/upload-artifact@v3 | |
with: | |
name: dependency-graphs | |
path: pydeps/ | |
if-no-files-found: warn # 'error' or 'ignore' are also available, defaults to `warn` |