Skip to content

Commit

Permalink
Fix: Installation on windows by using aac-metrics download to compare…
Browse files Browse the repository at this point in the history
… aac-metrics with CET.
  • Loading branch information
Labbeti committed Dec 7, 2023
1 parent c3002df commit 7a4ed30
Show file tree
Hide file tree
Showing 3 changed files with 11 additions and 2 deletions.
1 change: 1 addition & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
[submodule "caption-evaluation-tools"]
path = tests/caption-evaluation-tools
url = https://github.com/audio-captioning/caption-evaluation-tools
ignore = dirty
branch = master

[submodule "fense"]
Expand Down
2 changes: 1 addition & 1 deletion src/aac_metrics/download.py
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ def _download_spice(
try:
check_spice_install(cache_path)
return None
except (FileNotFoundError, NotADirectoryError):
except (FileNotFoundError, NotADirectoryError, PermissionError):
pass

# Download JAR files for SPICE metric
Expand Down
10 changes: 9 additions & 1 deletion tests/test_compare_cet.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import importlib
import os.path as osp
import platform
import shutil
import subprocess
import sys
import unittest
Expand Down Expand Up @@ -62,7 +63,14 @@ def _import_cet_eval_func(
shell=on_windows,
)
else:
# Use aac-metrics SPICE installation, but it requires to move some files after
_download_spice(str(cet_cache_path), clean_archives=True, verbose=2)
shutil.copytree(
cet_cache_path.joinpath("aac-metrics", "spice"),
cet_cache_path.joinpath("spice"),
dirs_exist_ok=True,
)
shutil.rmtree(cet_cache_path.joinpath("aac-metrics"))

# Append cet_path to allow imports of "caption" in eval_metrics.py.
sys.path.append(cet_path)
Expand Down Expand Up @@ -114,7 +122,7 @@ def _test_with_example(self, cands: list[str], mrefs: list[list[str]]) -> None:
# if platform.system() == "Windows":
# return None

corpus_scores, _ = evaluate(cands, mrefs, metrics="dcase2020")
corpus_scores, _ = evaluate(cands, mrefs, metrics="dcase2020", preprocess=True)

self.assertIsInstance(corpus_scores, dict)

Expand Down

0 comments on commit 7a4ed30

Please sign in to comment.