From 59f78c3f48bcaaa2fb79d8d39b52570735e7a451 Mon Sep 17 00:00:00 2001 From: Khaled K Shehada Date: Wed, 20 Sep 2023 11:36:08 -0400 Subject: [PATCH] Added unit test for scoring with BS_INSTALL_DEPENDENCIES --- tests/test_integration.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/tests/test_integration.py b/tests/test_integration.py index 8509affe..6b7bd182 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -1,3 +1,4 @@ +import os import pytest import subprocess import sys @@ -53,6 +54,30 @@ def test_score(model_identifier, benchmark_identifier, expected_score): assert actual_score == expected_score +@pytest.mark.parametrize( + "model_identifier, benchmark_identifier, expected_score, install_dependencies", + [ + ("randomembedding-100", "Pereira2018.243sentences-linear", + approx(0.0285022, abs=0.0005), "newenv"), + ("randomembedding-100", "Pereira2018.243sentences-linear", + approx(0.0285022, abs=0.0005), "yes"), + ("randomembedding-100", "Pereira2018.243sentences-linear", + approx(0.0285022, abs=0.0005), "no"), + ] +) +def test_score_with_install_dependencies( + model_identifier, benchmark_identifier, expected_score, install_dependencies): + install_dependence_preference = os.environ.get( + "BS_INSTALL_DEPENDENCIES", "yes") + os.environ["BS_INSTALL_DEPENDENCIES"] = install_dependencies + actual_score = score( + model_identifier=model_identifier, + benchmark_identifier=benchmark_identifier, + conda_active=True) + os.environ["BS_INSTALL_DEPENDENCIES"] = install_dependence_preference + assert actual_score == expected_score + + def test_commandline_score(): process = subprocess.run( [