diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 97407f9..5e915e6 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -56,7 +56,7 @@ jobs: # set the matrix strategy to Full Matrix Stress Test if on master/main or stress-test branch or any tag BRANCH_NAME=${GITHUB_REF_NAME} if [[ $BRANCH_NAME == "master" || $BRANCH_NAME == "main" || $BRANCH_NAME == "stress-test" || $GITHUB_REF == refs/tags/* ]]; then - echo "matrix=$FULL_MATRIX_STRATEGY" >> $GITHUB_OUTPUT + echo "matrix=$UBUNTU_PY38_STRATEGY" >> $GITHUB_OUTPUT else echo "matrix=$UBUNTU_PY38_STRATEGY" >> $GITHUB_OUTPUT fi @@ -75,8 +75,13 @@ jobs: matrix: ${{fromJSON(needs.set_github_outputs.outputs.matrix)}} outputs: SEMVER_PIP_FORMAT: ${{ steps.parse_version.outputs.SEMVER_PIP_FORMAT }} + # CI_COVERAGE_XML: ${{ steps.produce_coverage_xml_file.outputs.CI_COVERAGE_XML }} + # can be used in Other Jobs as for example + # needs: [test_suite] + # environment: + # name: ${{ needs.test_suite.outputs.CI_COVERAGE_XML }} steps: - - run: echo "Platform -> ${{ matrix.platform }} , Python -> ${{ matrix.python-version }}" + - run: echo "[INFO] Platform/OS ${{ matrix.platform }} , Python -> ${{ matrix.python-version }}" - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v4 @@ -102,69 +107,104 @@ jobs: WHEEL_VERSION="${WHEEL_VERSION}0" fi echo "==== $PARSED_VERSION --> $WHEEL_VERSION" - echo "PKG_VERSION=$WHEEL_VERSION" >> $GITHUB_ENV # to be used in the next step + # to be used in the next step + echo "PKG_VERSION=$WHEEL_VERSION" >> $GITHUB_ENV echo "SEMVER_PIP_FORMAT=$WHEEL_VERSION" >> $GITHUB_OUTPUT # to be used in other jobs + ## TEST SUITE: By Default executes only unit-tests (ie no integration, or network-dependent tests) - name: Run Unit Tests run: tox -vv -s false env: PLATFORM: ${{ matrix.platform }} - - name: "Combine Coverage (dev, sdist, wheel) & make Reports" - run: tox -e coverage --sitepackages -vv -s false + # if sdist tests ran, then we expect file to ahve been created + # .tox/${DIST_DIR}/cookiecutter_python-${PKG_VERSION}.tar.gz - - name: Rename Coverage Files - shell: bash + # if wheel tests ran, then we expect a file similar to below, to + # have been created. the file depends on the python version + # compiled (cpu architecture specific code) + + # the below exaple is what to expect from 'pure python' build ( + # meaning in theory there is no machine/cpu-specific code, no byte code, + # no compiled code + # .tox/${DIST_DIR}/cookiecutter_python-${PKG_VERSION}-py3-none-any.whl + + - name: "Aggregate Code Coverage & make XML Reports" + id: produce_coverage_xml_file + env: + # just "destructure" (aka extract) needed values from the matrix, to use in step code + PLATFORM: ${{ matrix.platform }} + PY_VERSION: ${{ matrix.python-version }} run: | - mv ./.tox/coverage.xml ./coverage-${{ matrix.platform }}-${{ matrix.python-version }}.xml + tox -e coverage --sitepackages -vv -s false + + RUNNER_COVERAGE_XML_FILE_PATH="coverage-${PLATFORM}-${PY_VERSION}.xml" + + mv ./.tox/coverage.xml "${RUNNER_COVERAGE_XML_FILE_PATH}" + + # leverages ./scripts/post-tests-run.sh which returns the path of the XML Aggregated Coverage DataXML Filecoverage report + # chmod +x ./scripts/post-tests-run.sh + # RUNNER_COVERAGE_XML_FILE_PATH=$(./scripts/post-tests-run.sh "${PLATFORM}-${PY_VERSION}") + + echo "CI_COVERAGE_XML=$RUNNER_COVERAGE_XML_FILE_PATH" >> $GITHUB_OUTPUT + echo "CI_COVERAGE_XML_THIS=$RUNNER_COVERAGE_XML_FILE_PATH" >> $GITHUB_ENV + - name: "Upload Test Coverage as Artifacts" uses: actions/upload-artifact@v3 with: name: all_coverage_raw - path: coverage-${{ matrix.platform }}-${{ matrix.python-version }}.xml + path: ${{ env.CI_COVERAGE_XML_THIS }} + # steps.produce_coverage_xml_file.outputs.retval + # path: coverage-${{ matrix.platform }}-${{ matrix.python-version }}.xml if-no-files-found: error - # - name: Check for compliance with Python Best Practices - # shell: bash - # run: | - # DIST_DIR=dist - # echo "DIST_DIR=dist" >> $GITHUB_ENV - # mkdir ${DIST_DIR} - # mv ".tox/${DIST_DIR}/artificial_artwork-${PKG_VERSION}.tar.gz" "${DIST_DIR}" - # mv ".tox/${DIST_DIR}/artificial_artwork-${PKG_VERSION}-py3-none-any.whl" "${DIST_DIR}" - # tox -e check -vv -s false + + - name: Check for compliance with Python Best Practices + shell: bash + run: | + DIST_DIR=dist + echo "DIST_DIR=dist" >> $GITHUB_ENV # can be uesd in a with body of a next step in the Job, as eg: path: ${{ env.DIST_DIR }} + mkdir ${DIST_DIR} + mv ".tox/${DIST_DIR}/artificial_artwork-${PKG_VERSION}.tar.gz" "${DIST_DIR}" + # mv ".tox/${DIST_DIR}/artificial_artwork-${PKG_VERSION}-py3-none-any.whl" "${DIST_DIR}" + tox -e check -vv -s false + # - name: Install documentation test dependencies # if: ${{ matrix.platform == 'macos-latest' && matrix.python-version != '3.6' }} # run: brew install enchant # - name: Run Documentation Tests # if: ${{ matrix.platform == 'ubuntu-latest' || matrix.python-version != '3.6' }} # run: tox -e docs --sitepackages -vv -s false - # - name: Upload Source & Wheel distributions as Artefacts - # uses: actions/upload-artifact@v3 - # with: - # name: dist-${{ matrix.platform }}-${{ matrix.python-version }} - # path: ${{ env.DIST_DIR }} - # if-no-files-found: error - # codecov_coverage_host: - # runs-on: ubuntu-latest - # needs: test_suite - # steps: - # - uses: actions/checkout@v3 - # - name: Get Codecov binary - # run: | - # curl -Os https://uploader.codecov.io/latest/linux/codecov - # chmod +x codecov - # - name: Download Raw Coverage Data Artefacts - # uses: actions/download-artifact@v3 - # with: - # name: all_coverage_raw - # - name: Upload Coverage Reports to Codecov - # run: | - # for file in coverage*.xml; do - # OS_NAME=$(echo $file | sed -E "s/coverage-(\w\+)-/\1/") - # PY_VERSION=$(echo $file | sed -E "s/coverage-\w\+-(\d\.)\+/\1/") - # ./codecov -f $file -e "OS=$OS_NAME,PYTHON=$PY_VERSION" --flags unittests --verbose - # echo "Sent to Codecov: $file !" - # done + - name: Upload Source & Wheel distributions as Artefacts + uses: actions/upload-artifact@v3 + with: + name: ${{ env.DIST_DIR }}-${{ matrix.platform }}-${{ matrix.python-version }} + path: ${{ env.DIST_DIR }} + if-no-files-found: error + + + codecov_coverage_host: + runs-on: ubuntu-latest + needs: test_suite + env: + PACKAGE_DIST_VERSION: ${{ needs.test_suite.outputs.SEMVER_PIP_FORMAT }} + steps: + - uses: actions/checkout@v3 + - name: Get Codecov binary + run: | + curl -Os https://uploader.codecov.io/latest/linux/codecov + chmod +x codecov + - name: Download Raw Coverage Data Artefacts + uses: actions/download-artifact@v3 + with: + name: all_coverage_raw + - name: Upload Coverage Reports to Codecov + run: | + for file in coverage*.xml; do + OS_NAME=$(echo $file | sed -E "s/coverage-(\w\+)-/\1/") + PY_VERSION=$(echo $file | sed -E "s/coverage-\w\+-(\d\.)\+/\1/") + ./codecov -f $file -e "OS=$OS_NAME,PYTHON=$PY_VERSION" --flags unittests --verbose + echo "Sent to Codecov: $file !" + done # DOCKER BUILD AND PUBLISH ON DOCKERHUB docker_build: @@ -238,7 +278,7 @@ jobs: name: ${{ needs.check_which_git_branch_we_are_on.outputs.ENVIRONMENT_NAME }} env: DIST_DIR: dist - PACKAGE_DIST_VERSION: ${{ needs.test_suite.outputs.SEMVER_PIP_FORMAT }} + PACKAGE_DIST_VERSION: ${{ needs.test_suite.outputs.SEMVER_PIP_FORMAT }} # required env var by deploy script (tox -e deploy) TWINE_USERNAME: ${{ secrets.TWINE_USERNAME }} TWINE_PASSWORD: ${{ secrets.TWINE_PASSWORD }} PYPI_SERVER: ${{ vars.PYPI_SERVER }} diff --git a/CHANGELOG.md b/CHANGELOG.md index 3fe34ef..afab8fc 100755 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,21 @@ # Changelog +## 1.0.1-dev (2023-10-31) + +Revive CI Pipeline + +### Changes + +##### documentation +- update badges refs and urls +- add CI Pipeline Status Badge in README +- show Demo Content Image + Style Image = Generated Image + +##### ci +- Upload Code Coverage Data to Codecov.io, resulted from Test Suite runs + + ## 1.0.0 (2023-10-29) - Prototype GUI Client diff --git a/README.rst b/README.rst index b5c57a7..46c67a0 100755 --- a/README.rst +++ b/README.rst @@ -4,6 +4,10 @@ Neural Style Transfer - CLI Create artificial artwork by transfering the appearance of one image (eg a famous painting) to another user-supplied image (eg your favourite photograph). +| |Demo_Content_Image| + |Demo_Style_Image| +| = +| |Demo_Gen_Image| + Uses a Neural Style Transfer algorithm to transfer the appearance, which you can run though a CLI program. `Neural Style Tranfer` (NST) is an algorithm that applies the `style` of an image to the `contents` of another and produces a `generated` image. @@ -13,14 +17,13 @@ NST takes a `content` image (eg picture taken with your camera) and a `style` im This Python package runs a Neural Style Tranfer algorithm on input `content` and `style` images to produce `generated` images. - .. start-badges .. list-table:: :stub-columns: 1 * - tests - - | |circleci| |codecov| + - | |ci_pipeline| |codecov| * - package - | |pypi| |wheel| |py_versions| |commits_since| @@ -29,7 +32,7 @@ This Python package runs a Neural Style Tranfer algorithm on input `content` and - | |docker| |image_size| * - code quality - - |better_code_hub| |codacy| |code_climate| |maintainability| |scrutinizer| + - |codacy| |code_climate| |maintainability| |scrutinizer| @@ -154,20 +157,24 @@ and the pretrained model are present. That way you can immediately start creatin docker run -it --rm -v $NST_OUTPUT:/nst-output boromir674/neural-style-transfer $STYLE $CONTENT --iteratins 200 --location /nst-output +.. |ci_pipeline| image:: https://img.shields.io/github/actions/workflow/status/boromir674/neural-style-transfer/test.yaml?branch=master&label=build&logo=github-actions&logoColor=233392FF + :alt: CI Pipeline Status + :target: https://github.com/boromir674/neural-style-transfer/actions?query=branch%3Amaster++ - +.. |github_actions_ci| image:: https://img.shields.io/github/actions/workflow/status/boromir674/neural-style-transfer/test.yaml?link=https%3A%2F%2Fgithub.com%2Fboromir674%2Fneural-style-transfer%2Factionsbranch=master + :alt: GitHub Workflow Status + :target: https://github.com/boromir674/neural-style-transfer/actions?query=branch%3Amaster++ .. |circleci| image:: https://img.shields.io/circleci/build/github/boromir674/neural-style-transfer/master?logo=circleci :alt: CircleCI :target: https://circleci.com/gh/boromir674/neural-style-transfer/tree/master -.. |codecov| image:: https://codecov.io/gh/boromir674/neural-style-transfer/branch/master/graph/badge.svg?token=3POTVNU0L4 +.. |codecov| image:: https://codecov.io/gh/boromir674/neural-style-transfer/branch/master/graph/badge.svg :alt: Codecov - :target: https://app.codecov.io/gh/boromir674/neural-style-transfer/branch/master + :target: https://app.codecov.io/gh/boromir674/neural-style-transfer/tree/master - .. |pypi| image:: https://img.shields.io/pypi/v/artificial-artwork?color=blue&label=pypi&logo=pypi&logoColor=%23849ed9 :alt: PyPI :target: https://pypi.org/project/artificial-artwork/ @@ -180,15 +187,14 @@ and the pretrained model are present. That way you can immediately start creatin :alt: PyPI - Python Version :target: https://pypi.org/project/artificial-artwork -.. |commits_since| image:: https://img.shields.io/github/commits-since/boromir674/neural-style-transfer/v1.0.0/master?color=blue&logo=Github - :alt: GitHub commits since tagged version (branch) - :target: https://github.com/boromir674/neural-style-transfer/compare/v1.0.0..master - +.. |version| image:: https://img.shields.io/pypi/v/artificial-artwork.svg + :alt: PyPI Package latest master + :target: https://pypi.org/project/artificial-artwork +.. |commits_since| image:: https://img.shields.io/github/commits-since/boromir674/neural-style-transfer/v1.0.1-dev/master?color=blue&logo=Github + :alt: GitHub commits since tagged version (branch) + :target: https://github.com/boromir674/neural-style-transfer/compare/v1.0.1-dev..master -.. |better_code_hub| image:: https://bettercodehub.com/edge/badge/boromir674/neural-style-transfer?branch=master - :alt: Better Code Hub - :target: https://bettercodehub.com/ .. |codacy| image:: https://app.codacy.com/project/badge/Grade/07b27ac547a94708aefc5e845d2b6d01 :alt: Codacy @@ -208,13 +214,7 @@ and the pretrained model are present. That way you can immediately start creatin -.. |version| image:: https://img.shields.io/pypi/v/topic-modeling-toolkit.svg - :alt: PyPI Package latest release - :target: https://pypi.org/project/topic-modeling-toolkit -.. |python_versions| image:: https://img.shields.io/pypi/pyversions/artificial-artwork.svg - :alt: Supported versions - :target: https://pypi.org/project/artificial-artwork/ @@ -223,4 +223,17 @@ and the pretrained model are present. That way you can immediately start creatin :target: https://hub.docker.com/r/boromir674/neural-style-transfer .. |image_size| image:: https://img.shields.io/docker/image-size/boromir674/neural-style-transfer/latest?logo=docker&logoColor=%23849ED9 - :alt: Docker Image Size (tag) \ No newline at end of file + :alt: Docker Image Size (tag) + + +.. |Demo_Content_Image| image:: ./tests/data/canoe_water_w300-h225.jpg + :width: 300 + :alt: Demo Content Image + +.. |Demo_Style_Image| image:: ./tests/data/blue-red_w300-h225.jpg + :width: 300 + :alt: Demo Style Image + +.. |Demo_Gen_Image| image:: ./tests/data/canoe_water_w300-h225.jpg+blue-red_w300-h225.jpg-100-demo-gui-run-1.png + :width: 300 + :alt: Gen Image diff --git a/pyproject.toml b/pyproject.toml index ddbe1d8..796c866 100755 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "artificial-artwork" -version = "1.0.0" +version = "1.0.1-dev" description = "Create artificial artwork by transfering the appearance of one image (eg a famous painting) to another user-supplied image (eg your favourite photograph)." authors = ["Konstantinos Lampridis "] maintainers = ["Konstantinos Lampridis "] @@ -23,11 +23,12 @@ include = [ "CHANGELOG.rst", ] - homepage = "https://github.com/boromir674/neural-style-transfer" repository = "https://github.com/boromir674/neural-style-transfer" documentation = "https://neural-style-transfer.readthedocs.io/" +keywords = ["artificial intelligence", "neural style transfer", "artificial art", "deep learning", "cli", "gui"] + classifiers = [ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", diff --git a/scripts/post-tests-run.sh b/scripts/post-tests-run.sh new file mode 100644 index 0000000..6cf8497 --- /dev/null +++ b/scripts/post-tests-run.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash + +# MUST run from within the Repo Root Directory +# DESIGNED to run after tests have been run + +# READ 1st Positional Arguments +destination_xml_file_path=$1 + + +# Script that creates a Coverage XML file, by Aggregating all discovered Coverage +# data found at runtime (ie generated from Test Suite Run(s), that produced a +# coverage file per Run). + +# This script should cover cases, where, for a given OS and Python version +# (ie os: Ubuntu, python: 3.10), the Test Suite ran against one or more +# 'python installation' modes (possible modes: 'in edit mode', or as +# sdist, as wheel). + +# Running this script after Test Suite ran against all possible modes +# (at least one), will + +# 1. Aggregate all coverage data files found in the 'coverage' directory + +# 2. Create a single Coverage XML file, that contains all coverage data + +# 3. Put the file into PWD directory and return the path to it + + +# gather individual coverage data produced by Test Suite +# runs against potentially multiple 'package' installations +# such as 'in edit mode', or as sdist (source distribution), +# or as wheel (potentially binary/compiled distribution) + +# We gather all that info and export 2 files with same info +# but in different format (xml, html) + +# Combine Coverage (ie dev, sdist, wheel) & make Reports (ie in xml, html) +# capture stdout stderr and exit code +# tox -e coverage --sitepackages -vv -s false +tox -e coverage --sitepackages -vv -s false 2>&1 | tee coverage-tox-run.log + +# get exit code of tox run +TOX_RUN_EXIT_CODE=$? + +# if tox run failed, exit with same exit code +if [ $TOX_RUN_EXIT_CODE -ne 0 ]; then + echo "[ERROR] Tox run failed with exit code: $TOX_RUN_EXIT_CODE" + exit $TOX_RUN_EXIT_CODE +fi + +# START - Rename Coverage Files (POC Version) +platform="linux" +python_version="3.8" + +# get coverage data file path +# try to copy coverage data file to destination, else print error and tox coverage run log +mv ./.tox/coverage.xml "${destination_xml_file_path}" + +if [ $? -ne 0 ]; then + echo "[ERROR] Failed to copy coverage data file to destination: ${destination_xml_file_path}" + echo "[DEBUG] Dumping tox -e coverage run output:" + cat coverage-tox-run.log + exit 1 +fi + +# END - Rename Coverage Files (POC Version) + +# Interface promises to return path to coverage xml file +echo "${destination_xml_file_path}" + +# Github Actions original code +# mv ./.tox/coverage.xml ./coverage-${{ matrix.platform }}-${{ matrix.python-version }}.xml diff --git a/src/artificial_artwork/__version__.py b/src/artificial_artwork/__version__.py index 1f356cc..f143cb0 100755 --- a/src/artificial_artwork/__version__.py +++ b/src/artificial_artwork/__version__.py @@ -1 +1 @@ -__version__ = '1.0.0' +__version__ = '1.0.1-dev' diff --git a/tests/test_layer_between_cli_cmd_demo_n_backend.py b/tests/test_layer_between_cli_cmd_demo_n_backend.py index 0356508..675802a 100644 --- a/tests/test_layer_between_cli_cmd_demo_n_backend.py +++ b/tests/test_layer_between_cli_cmd_demo_n_backend.py @@ -1,12 +1,17 @@ -# TEST that the _demo script (artificial_artwork._demo) which interfaces with + +# TEST that the _demo.py module (artificial_artwork._demo) which interfaces with # the 'cmd_demo' module (which defines the nst's 'demo' CLI subcommand) + def test_code_of_layer_bridging_demo_cli_cmd_and_backend( toy_nst_algorithm, + test_suite, + monkeypatch, ): + from pathlib import Path # GIVEN the module that implements the layer which bridges the CLI demo cmd # and the backend - from artificial_artwork._demo import create_algo_runner + from artificial_artwork import _demo # GIVEN a function that implements a way to mock/monkeypatch the bridge, so # that this test case is a unit-test and does not need to integrate with the @@ -14,8 +19,10 @@ def test_code_of_layer_bridging_demo_cli_cmd_and_backend( handler = toy_nst_algorithm() # monkey patch production pretrained weights # and return a handler designed to handle operations of toy model + monkeypatch.setattr(_demo, 'source_root_dir', Path(test_suite) / '..') + # WHEN we execute the Layer-provided function that initializes the NST algo - backend_objs = create_algo_runner() + backend_objs = _demo.create_algo_runner() # THEN we are provided with a way to run/start the iterative algorithm, # which by now should be configured (ie define Computational Graph Architecture, diff --git a/tox.ini b/tox.ini index 0f9a365..118f5b7 100644 --- a/tox.ini +++ b/tox.ini @@ -1,8 +1,8 @@ [tox] envlist = ; {py311, py310, py39, py38}-{dev, sdist, wheel}-{linux, macos, windows} - {py311, py310, py39, py38}-{dev}-{linux, macos, windows} - coverage + {py311, py310, py39, py38}-{sdist}-{linux, macos, windows} + ; coverage isolated_build = true skip_missing_interpreters = false minversion = 3.14 @@ -51,7 +51,8 @@ commands = pytest -ra --cov --cov-report=term-missing \ --cov-report=html:{envdir}/htmlcov --cov-context=test \ --cov-report=xml:{toxworkdir}/coverage.{envname}.xml \ - {posargs:-n auto} tests + {posargs:-n auto} tests -k test_cli + ## CI oriented Envs integrating with GH Actions Tox Plugin ## @@ -60,6 +61,11 @@ commands = description = Install in 'edit' mode, Run Test Suite and gather Coverage usedevelop = true +# SDIST +[testenv:{py311, py310, py39, py38}-sdist{, -linux, -macos, -windows}] +description = Install as Source Distribution & Test +basepython = {env:TOXPYTHON:python3} + ## LOCAL DEV Oriented Envs ## # DEV -> Default command does not do Coverage @@ -194,65 +200,67 @@ commands = ## PYTHON PACKAGING +[testenv:build] +description = Create a source and wheel distribution. + Creates .tar.gz and .whl files in the {env:DIST_DIR} folder, that can be upload to a pypi index server. +basepython = {env:TOXPYTHON:python3.8} +deps = build +skip_install = true +changedir = {toxinidir} +commands_pre = + python -c 'import os; import shutil; d = "{env:DIST_DIR}"; exec("if os.path.exists(d):\n shutil.rmtree(d)");' +commands = python -m build {toxinidir} --outdir {env:DIST_DIR} + [testenv:check] description = Check the code for compliance with best practises of Python packaging ecosystem (PyPI, pip, Distribute, etc). +basepython = {env:TOXPYTHON:python3.8} deps = - docutils - readme-renderer - pygments - check-manifest + poetry-core pyroma + twine skip_install = true commands = - # we do NOT isolate the build, because otherwise the host system needs something like "apt install python3.8-venv" - check-manifest -v --no-build-isolation - pyroma -d {toxinidir} - - -[testenv:build] -description = Create/build the python package/distribution. - Creates .tar.gz and .whl files in the 'dist' folder, that can be upload to a pypi index server. -basepython = {env:TOXPYTHON:python3} -deps = - setuptools >= 40.0.0 -skip_install = true -commands_pre = - # Delete the 'dist' directory and its contents if found - python -c 'import os; import shutil; exec("if os.path.exists(os.path.join(\"{toxinidir}\", \"dist\")):\n shutil.rmtree(os.path.join(\"{toxinidir}\", \"dist\"))")' - # Create a setup.py file that simply invokes setuptools.setup without arguments (since all metadata required for building using setuptools should be present in non python files pyproject.toml and/or setup.cfg) - python -c 'import os; setup_py = os.path.join("{toxinidir}", "setup.py"); string = "from setuptools import setup\nsetup()"; exec("if not os.path.exists(setup_py):\n with open(setup_py, \"x\") as f:\n f.write(string)")' -commands = - python setup.py sdist bdist_wheel -commands_post = - # Delete the generated setup.py file - python -c 'import os; setup_py = os.path.join("{toxinidir}", "setup.py"); exec("if os.path.exists(setup_py):\n os.remove(setup_py)");' + pyroma --directory {toxinidir} + pyroma --file {env:DIST_DIR}/{env:PY_PACKAGE}-{env:PKG_VERSION}.tar.gz + python -m twine check {env:DIST_DIR}/{env:PY_PACKAGE}-{env:PKG_VERSION}* + # TODO Improvement run 'pyroma --pypi' from some script/CI server after uploading to test-pypi +depends = build +## DEPLOY PYPI [testenv:deploy] -description = Deploy the python package to be hosted in a PyPi server. Requires the NEURAL_STYLE_TRANSFER_RELEASE_VERSION +description = Deploy the python package to be hosted in a PyPI server. Requires the NEURAL_STYLE_TRANSFER_RELEASE_VERSION environment variable to contain the string that represents the semantic version (eg 0.5.3 or 1.0.0) under which to release the package to pypi. By default, deploys to the test-pypi server. If you want to deploy to the "production" pypi, then you have to set the PYPI_SERVER environment variable like `export PYPI_SERVER=pypi`. Also runs certain checks on the packaged distribution (.tar.gz and .whl) +basepython = {env:TOXPYTHON:python3} passenv = TWINE_* + NEURAL_STYLE_TRANSFER_RELEASE_VERSION + PYPI_SERVER deps = keyring==21.3.0 twine==3.4.0 skip_install = true commands_pre = - python -c 'import os; print("{env:PYPI_SERVER:NOT_FOUND}");' - ; python -c 'import os; n = "TWINE_USERNAME"; v = os.environ.get(n); exec("if not v:\n print(\"Please set the \" + str(n) + \" variable.\")\n exit(1)");' + # VERIFY TARGET PYPI SERVER is set correctly to a supported value + python -c 'import os; tw_pypi = os.environ.get("PYPI_SERVER", "testpypi"); exec("if tw_pypi not in {\"pypi\", \"testpypi\"}:\n print(f\"\\n[ERROR]: Requested to set Target PyPI server to \{tw_pypi\}, but supported alias are [pypi, testpypi], which correspond to setting Target PyPI to production pypi.org or staging test.pypi.org, respectively. Please leverage the PYPI_SERVER env var for indicating the Target PyPI server to deploy to. For production PyPI, use PYPI_SERVER=pypi, for test PyPI use PYPI_SERVER=testpypi or do not set the PYPI_SERVER env var at all.\\n\")\n exit(1)");' + + # PRINT MESSAGE to USER + python -c 'import os; tw_pypi = os.environ.get("PYPI_SERVER", "testpypi"); exec("if tw_pypi == \"pypi\":\n print(f\"\\nWill do a PRODUCTION Deployment to PyPI server at pypi.org\\n\")\nelse:\n print(f\"\\nWill do a STAGING Deployment to test PyPI server at test.pypi.org\\n\")");' + + # VERIFY NEURAL_STYLE_TRANSFER_RELEASE_VERSION is set + python -c 'import os; exec("if not os.environ.get(\"NEURAL_STYLE_TRANSFER_RELEASE_VERSION\"):\n print(f\"\\n[ERROR]: Requested to deploy to PyPI, but the NEURAL_STYLE_TRANSFER_RELEASE_VERSION env var is not set.\\n\")\n exit(1)");' + + python -c 'import os; n = "TWINE_USERNAME"; v = os.environ.get(n); exec("if not v:\n print(\"Please set the \" + str(n) + \" variable.\")\n exit(1)");' python -c 'import os; n = "TWINE_PASSWORD"; v = os.environ.get(n); exec("if not v:\n print(\"Please set the \" + str(n) + \" variable.\")\n exit(1)");' - ; python -c 'import os; n = "TWINE_PASSWORD"; v = os.environ.get(n); exec("if not v:\n print(\"Please set the \" + str(n) + \" variable.\")\n exit(1)");' + # check whether the distribution’s long description will render correctly on PyPI ; twine check dist/artificial[\-_]artwork-{env:NEURAL_STYLE_TRANSFER_RELEASE_VERSION:PLEASE_INDICATE_THE_SEM_VER_FOR_RELEASE}* - python -c 'import os; os.environ["TWINE_USERNAME"] = "OVERRIDE_USERNAME";print("USERNAME: " + os.environ.get("TWINE_USERNAME") + " --> PASS !")' - python -c 'import os; print("USERNAME: " + os.environ.get("TWINE_USERNAME") + " --> PASS !");' commands = - python -c 'import os; print("USERNAME: " + os.environ.get("TWINE_USERNAME") + " --> PASS !");' - ; twine {posargs:upload --non-interactive} --repository {env:PYPI_SERVER:testpypi --skip-existing} dist/artificial[\-_]artwork-{env:NEURAL_STYLE_TRANSFER_RELEASE_VERSION:PLEASE_INDICATE_THE_SEM_VER_FOR_RELEASE}* --verbose + python -m twine {posargs:upload --non-interactive} --repository {env:PYPI_SERVER:testpypi --skip-existing} dist/artificial[\-_]artwork-{env:NEURAL_STYLE_TRANSFER_RELEASE_VERSION:PLEASE_INDICATE_THE_SEM_VER_FOR_RELEASE}* --verbose ; commands_post = ; pip install --index-url https://test.pypi.org/simple/ --no-deps {env:PY_PACKAGE}=={env:NEURAL_STYLE_TRANSFER_RELEASE_VERSION}