diff --git a/.github/workflows/publish-to-pypi.yml b/.github/workflows/publish-to-pypi.yml index 4d49723..1171e3c 100644 --- a/.github/workflows/publish-to-pypi.yml +++ b/.github/workflows/publish-to-pypi.yml @@ -18,7 +18,7 @@ jobs: - name: Install dependencies run: | pip install --upgrade pip - pip install setuptools wheel twine + pip install setuptools wheel twine build if [ -f requirements.txt ]; then pip install -r requirements.txt; fi - name: Build and publish diff --git a/.gitignore b/.gitignore index 59a2da0..52d35d5 100644 --- a/.gitignore +++ b/.gitignore @@ -10,4 +10,5 @@ output cache* lib .hypothesis -pycocoevalcap/tokenizer/tmp* \ No newline at end of file +pycocoevalcap/tokenizer/tmp* +.DS_Store \ No newline at end of file diff --git a/README.md b/README.md index 048cc54..0172cea 100644 --- a/README.md +++ b/README.md @@ -18,5 +18,4 @@ score = get_aces_score(candidates, references, average=True) ``` ## Evaluation -All the code that is used to evaluate different models for the research paper can be found in the `evaluation` folder. Particularly, -the model evaluation can be found in `evaluation/eval.py`, and information about the FENSE experiment can be found in `evaluation/fense_experiment/main.py`. +All the code that is used to evaluate different models for the research paper can be found in the `evaluation` folder on the [github](https://github.com/GlJS/ACES). Particularly, the model evaluation can be found in `evaluation/eval.py`, and information about the FENSE experiment can be found in `evaluation/fense_experiment/main.py`. diff --git a/evaluation/eval.py b/evaluation/eval.py index ccbe473..7c7d17b 100644 --- a/evaluation/eval.py +++ b/evaluation/eval.py @@ -12,7 +12,7 @@ import string from bert_score import score -from src.aces.aces import ACES, get_aces_score +from src.aces import ACES, get_aces_score from evaluation.eval_metrics import evaluate_metrics_from_lists from transformers import pipeline from fense.evaluator import Evaluator diff --git a/evaluation/fense_experiment/experiment/main.py b/evaluation/fense_experiment/experiment/main.py index c8e0f08..4cafcc1 100644 --- a/evaluation/fense_experiment/experiment/main.py +++ b/evaluation/fense_experiment/experiment/main.py @@ -2,7 +2,7 @@ os.environ["CUDA_VISIBLE_DEVICES"] = "2,1" import sys sys.path.insert(0, os.getcwd()) -from src.aces.aces import get_aces_score, ACES +from src.aces import get_aces_score, ACES import json import numpy as np import pandas as pd diff --git a/pyproject.toml b/pyproject.toml index e91f351..9c80be9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,8 +9,4 @@ description = "ACES metric for evaluating automated audio captioning models base readme = "README.md" authors = [{name = "Gijs Wijngaard", email = "hi@gijs.me"}] license = {file = "LICENSE"} -requires-python = ">=3.9" - -[tool.setuptools] -include-package-data = true -packages = ["aces"] \ No newline at end of file +requires-python = ">=3.9" \ No newline at end of file diff --git a/__init__.py b/src/aces/__init__.py similarity index 100% rename from __init__.py rename to src/aces/__init__.py diff --git a/test_aces.py b/test_aces.py index e9a5b51..05695a5 100644 --- a/test_aces.py +++ b/test_aces.py @@ -3,7 +3,7 @@ import torch os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2" -from src.aces.aces import get_aces_score, ACES +from src.aces import get_aces_score, ACES from transformers import pipeline import time from evaluation.fense_experiment.experiment.sweep import get_aces_score as get_aces_score_old