diff --git a/app/api/api_v1/endpoints/test_run_executions.py b/app/api/api_v1/endpoints/test_run_executions.py index a67a8e98..4926b68d 100644 --- a/app/api/api_v1/endpoints/test_run_executions.py +++ b/app/api/api_v1/endpoints/test_run_executions.py @@ -161,6 +161,11 @@ def start_test_run_execution( status_code=HTTPStatus.NOT_FOUND, detail="Test Run Execution not found" ) + if len(test_run_execution.project.pics.clusters) == 0: + raise HTTPException( + status_code=HTTPStatus.UNPROCESSABLE_ENTITY, detail="No PICS were informed." + ) + test_runner = TestRunner() try: diff --git a/app/tests/api/api_v1/test_test_run_executions.py b/app/tests/api/api_v1/test_test_run_executions.py index cba0091f..b4d829e2 100644 --- a/app/tests/api/api_v1/test_test_run_executions.py +++ b/app/tests/api/api_v1/test_test_run_executions.py @@ -796,6 +796,25 @@ async def test_test_run_execution_start(async_client: AsyncClient, db: Session) assert content["id"] == test_run_execution.id +@pytest.mark.asyncio +async def test_test_run_execution_start_no_pics( + async_client: AsyncClient, db: Session +) -> None: + test_run_execution = create_test_run_execution_with_some_test_cases(db=db, pics={}) + + # First attempt to start test run + response = await async_client.post( + f"{settings.API_V1_STR}/test_run_executions/{test_run_execution.id}/start", + ) + + # Assert 422 UNPROCESSABLE_ENTITY and a detail error message + assert response.status_code == HTTPStatus.UNPROCESSABLE_ENTITY + content = response.json() + assert isinstance(content, dict) + assert "detail" in content.keys() + assert content["detail"] == "No PICS were informed." + + @pytest.mark.asyncio async def test_test_run_execution_busy(async_client: AsyncClient, db: Session) -> None: test_run_execution = create_test_run_execution_with_some_test_cases(db=db) diff --git a/app/tests/utils/test_run_execution.py b/app/tests/utils/test_run_execution.py index e0e76123..4cac7a9f 100644 --- a/app/tests/utils/test_run_execution.py +++ b/app/tests/utils/test_run_execution.py @@ -23,8 +23,10 @@ from app.models import TestRunExecution from app.models.test_enums import TestStateEnum from app.schemas import TestSelection +from app.schemas.pics import PICS from app.schemas.test_run_execution import TestRunExecutionCreate from app.tests.utils.project import create_random_project +from app.tests.utils.test_pics_data import create_random_pics fake = Faker() @@ -85,12 +87,15 @@ def create_random_test_run_execution_archived( def create_random_test_run_execution( - db: Session, selected_tests: Optional[TestSelection] = {}, **kwargs: Any + db: Session, + selected_tests: Optional[TestSelection] = {}, + pics: Optional[PICS] = PICS(), + **kwargs: Any ) -> models.TestRunExecution: test_run_execution_dict = random_test_run_execution_dict(**kwargs) if test_run_execution_dict.get("project_id") is None: - project = create_random_project(db, config={}) + project = create_random_project(db, config={}, pics=pics) test_run_execution_dict["project_id"] = project.id test_run_execution_in = TestRunExecutionCreate(**test_run_execution_dict) @@ -110,7 +115,7 @@ def create_random_test_run_execution_with_test_case_states( "sample_tests": {"SampleTestSuite1": {"TCSS1001": num_test_cases}} } test_run_execution = create_random_test_run_execution( - db=db, selected_tests=selected_tests + db=db, selected_tests=selected_tests, pics=create_random_pics() ) test_suite_execution = test_run_execution.test_suite_executions[0] @@ -128,7 +133,7 @@ def create_random_test_run_execution_with_test_case_states( def create_test_run_execution_with_some_test_cases( - db: Session, **kwargs: Any + db: Session, pics: Optional[PICS] = create_random_pics(), **kwargs: Any ) -> TestRunExecution: return create_random_test_run_execution( db=db, @@ -137,6 +142,7 @@ def create_test_run_execution_with_some_test_cases( "SampleTestSuite1": {"TCSS1001": 1, "TCSS1002": 2, "TCSS1003": 3} } }, + pics=pics, **kwargs )