From c29d718b6a2de314754806ecc8e7144e433f6fc8 Mon Sep 17 00:00:00 2001 From: Henrik Stranneheim Date: Thu, 1 Feb 2024 08:30:50 +0100 Subject: [PATCH] Refactor pipeline to workflow (#2885) ### Changed - Refactor Pipeline to workflow for enum - Remove casting to str --- ...1_25_d241d8c493fb_add_pipeline_to_order.py | 4 +- ..._de0f5b78dca4_rename_pipeline_sars_cov2.py | 6 +-- ...ix_sars_cov_2_data_analysis_to_database.py | 6 +-- ...a5065b4_add_taxprofiler_analysis_option.py | 6 +-- ...72b_add_raredisease_to_analysis_options.py | 6 +-- ...998be2e367cf_fix_mip_on_fastq_wgs_cases.py | 12 ++--- ...d21feaa0_add_pipeline_limitations_table.py | 4 +- .../e9df15a35de4_fix_tumour_not_to_maf.py | 12 ++--- cg/apps/demultiplex/demultiplex_api.py | 4 +- cg/apps/orderform/json_orderform_parser.py | 8 ++-- cg/apps/tb/api.py | 6 +-- cg/cli/add.py | 6 +-- cg/cli/clean.py | 6 +-- cg/cli/delete/observations.py | 4 +- cg/cli/generate/report/base.py | 4 +- cg/cli/generate/report/utils.py | 20 ++++---- cg/cli/get.py | 2 +- cg/cli/set/case.py | 8 ++-- cg/cli/upload/base.py | 14 +++--- cg/cli/upload/clinical_delivery.py | 6 +-- cg/cli/upload/observations/observations.py | 4 +- cg/cli/upload/observations/utils.py | 6 +-- cg/cli/upload/scout.py | 14 +++--- cg/cli/upload/utils.py | 4 +- cg/cli/workflow/fastq/base.py | 4 +- cg/cli/workflow/fastq/fastq_service.py | 6 +-- cg/constants/constants.py | 4 +- cg/constants/delivery.py | 38 +++++++-------- cg/constants/housekeeper_tags.py | 24 +++++----- cg/constants/observations.py | 4 +- cg/constants/report.py | 14 +++--- cg/meta/clean/api.py | 8 ++-- cg/meta/orders/case_submitter.py | 6 +-- cg/meta/orders/fastq_submitter.py | 6 +-- cg/meta/orders/metagenome_submitter.py | 4 +- cg/meta/orders/microbial_submitter.py | 6 +-- cg/meta/orders/pool_submitter.py | 13 ++--- cg/meta/report/balsamic.py | 6 +-- cg/meta/report/mip_dna.py | 4 +- cg/meta/report/report_api.py | 6 +-- cg/meta/report/rnafusion.py | 4 +- cg/meta/rsync/rsync_api.py | 8 ++-- cg/meta/upload/gt.py | 8 ++-- cg/meta/upload/nipt/nipt.py | 4 +- cg/meta/upload/scout/uploadscoutapi.py | 22 ++++----- cg/meta/workflow/analysis.py | 4 +- cg/meta/workflow/balsamic.py | 5 +- cg/meta/workflow/balsamic_pon.py | 4 +- cg/meta/workflow/balsamic_qc.py | 4 +- cg/meta/workflow/balsamic_umi.py | 4 +- cg/meta/workflow/fluffy.py | 8 ++-- cg/meta/workflow/microsalt/microsalt.py | 6 +-- cg/meta/workflow/mip.py | 4 +- cg/meta/workflow/mip_dna.py | 6 +-- cg/meta/workflow/mip_rna.py | 4 +- cg/meta/workflow/mutant.py | 4 +- cg/meta/workflow/nf_analysis.py | 30 ++++++------ cg/meta/workflow/nf_handlers.py | 2 +- cg/meta/workflow/raredisease.py | 4 +- cg/meta/workflow/rnafusion.py | 11 ++--- cg/meta/workflow/taxprofiler.py | 11 ++--- cg/models/orders/constants.py | 20 ++++---- cg/models/orders/json_sample.py | 4 +- cg/models/orders/sample_base.py | 4 +- cg/models/orders/samples.py | 4 +- cg/models/report/validators.py | 4 +- cg/server/admin.py | 8 ++-- cg/server/dto/orders/orders_response.py | 4 +- cg/services/delivery_message/utils.py | 6 +-- cg/store/crud/create.py | 11 ++--- cg/store/crud/read.py | 26 +++++----- cg/store/filters/status_analysis_filters.py | 8 ++-- .../status_application_limitations_filters.py | 6 +-- cg/store/filters/status_case_filters.py | 14 +++--- cg/store/models.py | 12 ++--- poetry.lock | 2 +- .../orderform/test_excel_orderform_parser.py | 6 +-- tests/cli/add/test_cli_add_family.py | 4 +- tests/cli/clean/conftest.py | 18 +++---- tests/cli/clean/test_balsamic_clean.py | 6 +-- tests/cli/clean/test_hk_case_bundle_files.py | 8 ++-- tests/cli/clean/test_microbial_clean.py | 10 ++-- tests/cli/compress/test_cli_compress_fastq.py | 6 +-- tests/cli/generate/report/conftest.py | 6 +-- tests/cli/generate/report/test_utils.py | 4 +- tests/cli/set/test_cli_set_case.py | 4 +- tests/cli/upload/conftest.py | 6 +-- tests/cli/upload/test_cli_scout.py | 10 ++-- tests/cli/upload/test_cli_upload_auto.py | 4 +- tests/cli/upload/test_cli_upload_fastq.py | 4 +- tests/cli/upload/test_cli_upload_nipt.py | 8 ++-- .../upload/test_cli_upload_observations.py | 4 +- tests/cli/workflow/balsamic/conftest.py | 30 ++++++------ tests/cli/workflow/conftest.py | 10 ++-- tests/cli/workflow/fastq/test_fastq_base.py | 4 +- tests/cli/workflow/fluffy/conftest.py | 4 +- tests/cli/workflow/mip/conftest.py | 6 +-- .../workflow/mip/test_cli_mip_dna_start.py | 4 +- .../rnafusion/test_cli_rnafusion_run.py | 4 +- .../taxprofiler/test_cli_taxprofiler_run.py | 3 +- tests/conftest.py | 32 ++++++------- tests/meta/archive/conftest.py | 4 +- tests/meta/deliver/test_deliver_ticket.py | 6 +-- tests/meta/deliver/test_delivery_api.py | 8 ++-- .../test_PoolSubmitter_validate_order.py | 4 +- .../test_SarsCov2Submitter_store_order.py | 4 +- tests/meta/orders/test_meta_orders_api.py | 4 +- tests/meta/orders/test_meta_orders_lims.py | 8 ++-- tests/meta/orders/test_meta_orders_status.py | 36 +++++++------- tests/meta/report/conftest.py | 10 ++-- tests/meta/report/test_report_api.py | 10 ++-- tests/meta/rsync/conftest.py | 6 +-- tests/meta/upload/conftest.py | 8 ++-- tests/meta/upload/scout/conftest.py | 30 ++++++------ .../upload/scout/test_generate_load_config.py | 8 ++-- .../scout/test_meta_upload_scoutapi_rna.py | 6 +-- tests/meta/workflow/conftest.py | 6 +-- .../models/balsamic/test_balsamic_analysis.py | 4 +- tests/models/mip/test_mip_analysis.py | 4 +- tests/models/report/test_validators.py | 6 +-- tests/server/conftest.py | 6 +-- .../server/endpoints/test_orders_endpoint.py | 12 ++--- tests/store/api/conftest.py | 4 +- tests/store/conftest.py | 14 +++--- tests/store/crud/conftest.py | 14 +++--- tests/store/crud/read/test_read.py | 28 +++++------ .../crud/read/test_read_analyses_to_clean.py | 8 ++-- .../test_read_analyses_to_delivery_report.py | 8 ++-- tests/store/crud/read/test_read_analysis.py | 48 +++++++++---------- tests/store/crud/read/test_read_case.py | 22 ++++----- .../filters/test_status_analyses_filters.py | 14 +++--- ..._status_application_limitations_filters.py | 4 +- .../filters/test_status_cases_filters.py | 28 +++++------ tests/store/test_delivery.py | 4 +- tests/store_helpers.py | 18 +++---- tests/utils/test_dispatcher.py | 8 ++-- 136 files changed, 607 insertions(+), 625 deletions(-) diff --git a/alembic/versions/2024_01_25_d241d8c493fb_add_pipeline_to_order.py b/alembic/versions/2024_01_25_d241d8c493fb_add_pipeline_to_order.py index 2762a02959..76bf5cf2ce 100644 --- a/alembic/versions/2024_01_25_d241d8c493fb_add_pipeline_to_order.py +++ b/alembic/versions/2024_01_25_d241d8c493fb_add_pipeline_to_order.py @@ -9,7 +9,7 @@ import sqlalchemy as sa from alembic import op -from cg.constants import Pipeline +from cg.constants import Workflow # revision identifiers, used by Alembic. revision = "d241d8c493fb" @@ -21,7 +21,7 @@ def upgrade(): op.add_column( table_name="order", - column=sa.Column("workflow", sa.Enum(*tuple(Pipeline)), nullable=False), + column=sa.Column("workflow", sa.Enum(*tuple(Workflow)), nullable=False), ) diff --git a/alembic/versions/2024_01_25_de0f5b78dca4_rename_pipeline_sars_cov2.py b/alembic/versions/2024_01_25_de0f5b78dca4_rename_pipeline_sars_cov2.py index 59439050c7..6935f1ae58 100644 --- a/alembic/versions/2024_01_25_de0f5b78dca4_rename_pipeline_sars_cov2.py +++ b/alembic/versions/2024_01_25_de0f5b78dca4_rename_pipeline_sars_cov2.py @@ -10,7 +10,7 @@ from sqlalchemy.dialects import mysql from alembic import op -from cg.constants import Pipeline +from cg.constants import Workflow from cg.store.models import Analysis, Case # revision identifiers, used by Alembic. @@ -58,12 +58,12 @@ def upgrade(): for case in session.query(Case).filter(Case.data_analysis == "sars-cov-2"): print(f"Altering case: {str(case)}") - case.data_analysis = str(Pipeline.MUTANT) + case.data_analysis = str(Workflow.MUTANT) print(f"Altered case: {str(case)}") for analysis in session.query(Analysis).filter(Analysis.pipeline == "sars-cov-2"): print(f"Altering analysis: {str(analysis)}") - analysis.pipeline = str(Pipeline.MUTANT) + analysis.pipeline = str(Workflow.MUTANT) print(f"Altered analysis: {str(analysis)}") session.commit() diff --git a/alembic/versions/432379a1adfa_fix_sars_cov_2_data_analysis_to_database.py b/alembic/versions/432379a1adfa_fix_sars_cov_2_data_analysis_to_database.py index 7c3b15cddf..28010199ec 100644 --- a/alembic/versions/432379a1adfa_fix_sars_cov_2_data_analysis_to_database.py +++ b/alembic/versions/432379a1adfa_fix_sars_cov_2_data_analysis_to_database.py @@ -12,7 +12,7 @@ from sqlalchemy.orm import declarative_base from alembic import op -from cg.constants import DataDelivery, Pipeline +from cg.constants import DataDelivery, Workflow Base = declarative_base() @@ -29,7 +29,7 @@ class Case(Base): id = sa.Column(sa.types.Integer, primary_key=True) internal_id = sa.Column(sa.types.String(32), unique=True, nullable=False) name = sa.Column(sa.types.String(128), nullable=False) - data_analysis = Column(types.Enum(*list(Pipeline))) + data_analysis = Column(types.Enum(*list(Workflow))) data_delivery = Column(types.Enum(*list(DataDelivery))) def __str__(self) -> str: @@ -58,7 +58,7 @@ def upgrade(): .filter(Case.data_analysis == "") ): print(f"Altering family: {str(family)}") - family.data_analysis = str(Pipeline.SARS_COV_2) + family.data_analysis = str(Workflow.SARS_COV_2) print(f"Altered family: {str(family)}") session.commit() diff --git a/alembic/versions/9008aa5065b4_add_taxprofiler_analysis_option.py b/alembic/versions/9008aa5065b4_add_taxprofiler_analysis_option.py index 663a240104..e5ffbc80c6 100644 --- a/alembic/versions/9008aa5065b4_add_taxprofiler_analysis_option.py +++ b/alembic/versions/9008aa5065b4_add_taxprofiler_analysis_option.py @@ -11,7 +11,7 @@ from sqlalchemy.orm import declarative_base from alembic import op -from cg.constants import Pipeline +from cg.constants import Workflow # revision identifiers, used by Alembic. revision = "9008aa5065b4" @@ -46,13 +46,13 @@ class Analysis(Base): __tablename__ = "analysis" id = sa.Column(sa.types.Integer, primary_key=True) - pipeline = sa.Column(sa.types.Enum(*list(Pipeline))) + pipeline = sa.Column(sa.types.Enum(*list(Workflow))) class Case(Base): __tablename__ = "family" id = sa.Column(sa.types.Integer, primary_key=True) - data_analysis = sa.Column(sa.types.Enum(*list(Pipeline))) + data_analysis = sa.Column(sa.types.Enum(*list(Workflow))) def upgrade(): diff --git a/alembic/versions/9073c61bc72b_add_raredisease_to_analysis_options.py b/alembic/versions/9073c61bc72b_add_raredisease_to_analysis_options.py index e522fadc31..a89749e91e 100644 --- a/alembic/versions/9073c61bc72b_add_raredisease_to_analysis_options.py +++ b/alembic/versions/9073c61bc72b_add_raredisease_to_analysis_options.py @@ -11,7 +11,7 @@ from sqlalchemy.orm import declarative_base from alembic import op -from cg.constants import Pipeline +from cg.constants import Workflow # revision identifiers, used by Alembic. revision = "9073c61bc72b" @@ -47,13 +47,13 @@ class Analysis(Base): __tablename__ = "analysis" id = sa.Column(sa.types.Integer, primary_key=True) - pipeline = sa.Column(sa.types.Enum(*list(Pipeline))) + pipeline = sa.Column(sa.types.Enum(*list(Workflow))) class Case(Base): __tablename__ = "family" id = sa.Column(sa.types.Integer, primary_key=True) - data_analysis = sa.Column(sa.types.Enum(*list(Pipeline))) + data_analysis = sa.Column(sa.types.Enum(*list(Workflow))) def upgrade(): diff --git a/alembic/versions/998be2e367cf_fix_mip_on_fastq_wgs_cases.py b/alembic/versions/998be2e367cf_fix_mip_on_fastq_wgs_cases.py index 50acb4b5b9..2bc9ab979a 100644 --- a/alembic/versions/998be2e367cf_fix_mip_on_fastq_wgs_cases.py +++ b/alembic/versions/998be2e367cf_fix_mip_on_fastq_wgs_cases.py @@ -13,7 +13,7 @@ from sqlalchemy.orm import declarative_base from alembic import op -from cg.constants import PREP_CATEGORIES, DataDelivery, Pipeline +from cg.constants import PREP_CATEGORIES, DataDelivery, Workflow Base = declarative_base() @@ -38,7 +38,7 @@ class Case(Base): name = sa.Column(sa.types.String(128), nullable=False) customer_id = sa.Column(sa.ForeignKey("customer.id", ondelete="CASCADE"), nullable=False) customer = orm.relationship(Customer, foreign_keys=[customer_id]) - data_analysis = sa.Column(sa.types.Enum(*list(Pipeline))) + data_analysis = sa.Column(sa.types.Enum(*list(Workflow))) data_delivery = sa.Column(sa.types.Enum(*list(DataDelivery))) priority = sa.Column(sa.types.Integer, default=1, nullable=False) _panels = sa.Column(sa.types.Text) @@ -111,7 +111,7 @@ def upgrade(): session.query(Case) .filter(Case.customer_id == 1) .filter(Case.data_delivery == DataDelivery.FASTQ) - .filter(Case.data_analysis == Pipeline.FASTQ) + .filter(Case.data_analysis == Workflow.FASTQ) .filter(Case.priority == "research") .filter(Case.ordered_at >= datetime(year=2021, month=2, day=2)) ): @@ -126,7 +126,7 @@ def upgrade(): and sample.name == family.name ): print(f"changing data analysis from FASTQ to MIP for: {family}") - family.data_analysis = Pipeline.MIP_DNA + family.data_analysis = Workflow.MIP_DNA count += 1 session.commit() @@ -143,7 +143,7 @@ def downgrade(): session.query(Case) .filter(Case.customer_id == 1) .filter(Case.data_delivery == DataDelivery.FASTQ) - .filter(Case.data_analysis == Pipeline.MIP_DNA) + .filter(Case.data_analysis == Workflow.MIP_DNA) .filter(Case.priority == "research") .filter(Case.ordered_at >= datetime(year=2021, month=2, day=2)) ): @@ -158,7 +158,7 @@ def downgrade(): and sample.name == family.name ): print(f"changing data analysis from MIP to FASTQ for: {family}") - family.data_analysis = Pipeline.FASTQ + family.data_analysis = Workflow.FASTQ count += 1 session.commit() diff --git a/alembic/versions/e853d21feaa0_add_pipeline_limitations_table.py b/alembic/versions/e853d21feaa0_add_pipeline_limitations_table.py index a1e3dceaa6..4f0cb6b7d0 100644 --- a/alembic/versions/e853d21feaa0_add_pipeline_limitations_table.py +++ b/alembic/versions/e853d21feaa0_add_pipeline_limitations_table.py @@ -9,7 +9,7 @@ import sqlalchemy as sa from alembic import op -from cg.constants.constants import Pipeline +from cg.constants.constants import Workflow # revision identifiers, used by Alembic. revision = "e853d21feaa0" @@ -30,7 +30,7 @@ def upgrade(): sa.ForeignKey("application.id", ondelete="CASCADE"), nullable=False, ), - sa.Column("pipeline", sa.Enum(*list(Pipeline)), nullable=False), + sa.Column("pipeline", sa.Enum(*list(Workflow)), nullable=False), sa.Column("limitations", sa.Text()), sa.Column("comment", sa.Text()), sa.Column("created_at", sa.DateTime(), server_default=sa.func.now()), diff --git a/alembic/versions/e9df15a35de4_fix_tumour_not_to_maf.py b/alembic/versions/e9df15a35de4_fix_tumour_not_to_maf.py index b34ff8f059..54a5c1659f 100644 --- a/alembic/versions/e9df15a35de4_fix_tumour_not_to_maf.py +++ b/alembic/versions/e9df15a35de4_fix_tumour_not_to_maf.py @@ -13,7 +13,7 @@ from sqlalchemy.orm import declarative_base from alembic import op -from cg.constants import PREP_CATEGORIES, DataDelivery, Pipeline +from cg.constants import PREP_CATEGORIES, DataDelivery, Workflow # revision identifiers, used by Alembic. revision = "e9df15a35de4" @@ -38,7 +38,7 @@ class Case(Base): name = sa.Column(sa.types.String(128), nullable=False) customer_id = sa.Column(sa.ForeignKey("customer.id", ondelete="CASCADE"), nullable=False) customer = orm.relationship(Customer, foreign_keys=[customer_id]) - data_analysis = sa.Column(sa.types.Enum(*list(Pipeline))) + data_analysis = sa.Column(sa.types.Enum(*list(Workflow))) data_delivery = sa.Column(sa.types.Enum(*list(DataDelivery))) priority = sa.Column(sa.types.Integer, default=1, nullable=False) _panels = sa.Column(sa.types.Text) @@ -112,7 +112,7 @@ def upgrade(): session.query(Case) .filter(Case.customer_id == 1) .filter(Case.data_delivery == DataDelivery.FASTQ) - .filter(Case.data_analysis == Pipeline.MIP_DNA) + .filter(Case.data_analysis == Workflow.MIP_DNA) .filter(Case.priority == "research") ): if len(family.links) > 1: @@ -130,7 +130,7 @@ def upgrade(): and sample.name == family.name ): print(f"changing data analysis from MIP to FASTQ for: {family}") - family.data_analysis = Pipeline.FASTQ + family.data_analysis = Workflow.FASTQ count += 1 session.commit() @@ -146,7 +146,7 @@ def downgrade(): session.query(Case) .filter(Case.customer_id == 1) .filter(Case.data_delivery == DataDelivery.FASTQ) - .filter(Case.data_analysis == Pipeline.FASTQ) + .filter(Case.data_analysis == Workflow.FASTQ) .filter(Case.priority == "research") ): if len(family.links) > 1: @@ -164,7 +164,7 @@ def downgrade(): and sample.name == family.name ): print(f"changing data analysis from FASTQ to MIP-DNA for: {family}") - family.data_analysis = Pipeline.MIP_DNA + family.data_analysis = Workflow.MIP_DNA count += 1 session.commit() diff --git a/cg/apps/demultiplex/demultiplex_api.py b/cg/apps/demultiplex/demultiplex_api.py index ee5a2166d1..5759f8ccdb 100644 --- a/cg/apps/demultiplex/demultiplex_api.py +++ b/cg/apps/demultiplex/demultiplex_api.py @@ -10,7 +10,7 @@ from cg.apps.housekeeper.hk import HousekeeperAPI from cg.apps.slurm.slurm_api import SlurmAPI from cg.apps.tb import TrailblazerAPI -from cg.constants.constants import FileFormat, Pipeline +from cg.constants.constants import FileFormat, Workflow from cg.constants.demultiplexing import BclConverter, DemultiplexingDirsAndFiles from cg.constants.priority import SlurmQos from cg.constants.tb import AnalysisTypes @@ -207,7 +207,7 @@ def add_to_trailblazer( out_dir=flow_cell.trailblazer_config_path.parent.as_posix(), slurm_quality_of_service=self.slurm_quality_of_service, email=self.mail, - data_analysis=str(Pipeline.DEMULTIPLEX), + data_analysis=Workflow.DEMULTIPLEX, ) def start_demultiplexing(self, flow_cell: FlowCellDirectoryData): diff --git a/cg/apps/orderform/json_orderform_parser.py b/cg/apps/orderform/json_orderform_parser.py index 3f54c2ee44..23004861e7 100644 --- a/cg/apps/orderform/json_orderform_parser.py +++ b/cg/apps/orderform/json_orderform_parser.py @@ -1,5 +1,5 @@ from cg.apps.orderform.orderform_parser import OrderformParser -from cg.constants import DataDelivery, Pipeline +from cg.constants import DataDelivery, Workflow from cg.exc import OrderFormError from cg.models.orders.json_sample import JsonSample from cg.models.orders.order import OrderType @@ -7,9 +7,9 @@ class JsonOrderformParser(OrderformParser): ACCEPTED_DATA_ANALYSES: list[str] = [ - str(Pipeline.MIP_DNA), - str(Pipeline.FLUFFY), - str(Pipeline.BALSAMIC), + Workflow.MIP_DNA, + Workflow.FLUFFY, + Workflow.BALSAMIC, ] NO_VALUE: str = "no_value" samples: list[JsonSample] = [] diff --git a/cg/apps/tb/api.py b/cg/apps/tb/api.py index feba432fe1..f6a32119f5 100644 --- a/cg/apps/tb/api.py +++ b/cg/apps/tb/api.py @@ -6,10 +6,10 @@ from google.auth import jwt from google.auth.crypt import RSASigner -from cg.apps.tb.dto.create_job_request import CreateJobRequest +from cg.apps.tb.dto.create_job_request import CreateJobRequest from cg.apps.tb.models import TrailblazerAnalysis -from cg.constants import Pipeline +from cg.constants import Workflow from cg.constants.constants import APIMethods, FileFormat, JobType, WorkflowManager from cg.constants.priority import SlurmQos from cg.constants.tb import AnalysisStatus @@ -101,7 +101,7 @@ def add_pending_analysis( out_dir: str, slurm_quality_of_service: SlurmQos, email: str = None, - data_analysis: Pipeline = None, + data_analysis: Workflow = None, ticket: str = None, workflow_manager: str = WorkflowManager.Slurm, ) -> TrailblazerAnalysis: diff --git a/cg/cli/add.py b/cg/cli/add.py index ce31e505cf..65c7abd6e9 100644 --- a/cg/cli/add.py +++ b/cg/cli/add.py @@ -2,7 +2,7 @@ import click -from cg.constants import STATUS_OPTIONS, DataDelivery, Pipeline, Priority +from cg.constants import STATUS_OPTIONS, DataDelivery, Priority, Workflow from cg.constants.archiving import PDC_ARCHIVE_LOCATION from cg.constants.subject import Sex from cg.meta.transfer.external_data import ExternalDataAPI @@ -225,7 +225,7 @@ def add_sample( "data_analysis", help="Analysis workflow", required=True, - type=EnumChoice(Pipeline), + type=EnumChoice(Workflow), ) @click.option( "-dd", @@ -243,7 +243,7 @@ def add_case( context: CGConfig, priority: Priority, panel_abbreviations: tuple[str], - data_analysis: Pipeline, + data_analysis: Workflow, data_delivery: DataDelivery, customer_id: str, name: str, diff --git a/cg/cli/clean.py b/cg/cli/clean.py index 2234e2ec44..cd3935b31a 100644 --- a/cg/cli/clean.py +++ b/cg/cli/clean.py @@ -23,7 +23,7 @@ rnafusion_past_run_dirs, rsync_past_run_dirs, ) -from cg.constants.constants import DRY_RUN, SKIP_CONFIRMATION, Pipeline +from cg.constants.constants import DRY_RUN, SKIP_CONFIRMATION, Workflow from cg.constants.housekeeper_tags import AlignmentFileTag, ScoutTag from cg.exc import CleanFlowCellFailedError, FlowCellError from cg.meta.clean.api import CleanAPI @@ -172,7 +172,7 @@ def hk_case_bundle_files(context: CGConfig, days_old: int, dry_run: bool = False @clean.command("hk-bundle-files") @click.option("-c", "--case-id", type=str, required=False) -@click.option("-p", "--pipeline", type=Pipeline, required=False) +@click.option("-p", "--pipeline", type=Workflow, required=False) @click.option("-t", "--tags", multiple=True, required=True) @click.option("-o", "--days-old", type=int, default=30) @DRY_RUN @@ -182,7 +182,7 @@ def hk_bundle_files( case_id: str | None, tags: list, days_old: int | None, - pipeline: Pipeline | None, + pipeline: Workflow | None, dry_run: bool, ): """Remove files found in Housekeeper bundles.""" diff --git a/cg/cli/delete/observations.py b/cg/cli/delete/observations.py index 745ca1947d..a93b0d334c 100644 --- a/cg/cli/delete/observations.py +++ b/cg/cli/delete/observations.py @@ -10,7 +10,7 @@ ARGUMENT_CASE_ID, OPTION_LOQUSDB_SUPPORTED_PIPELINES, ) -from cg.constants.constants import DRY_RUN, SKIP_CONFIRMATION, Pipeline +from cg.constants.constants import DRY_RUN, SKIP_CONFIRMATION, Workflow from cg.exc import CaseNotFoundError, LoqusdbError from cg.meta.observations.balsamic_observations_api import BalsamicObservationsAPI from cg.meta.observations.mip_dna_observations_api import MipDNAObservationsAPI @@ -49,7 +49,7 @@ def delete_observations(context: CGConfig, case_id: str, dry_run: bool, yes: boo @DRY_RUN @click.pass_context def delete_available_observations( - context: click.Context, pipeline: Pipeline | None, dry_run: bool, yes: bool + context: click.Context, pipeline: Workflow | None, dry_run: bool, yes: bool ): """Delete available observation from Loqusdb.""" diff --git a/cg/cli/generate/report/base.py b/cg/cli/generate/report/base.py index e28dd0f681..b09dcfe985 100644 --- a/cg/cli/generate/report/base.py +++ b/cg/cli/generate/report/base.py @@ -21,7 +21,7 @@ get_report_api_pipeline, get_report_case, ) -from cg.constants import EXIT_FAIL, EXIT_SUCCESS, Pipeline +from cg.constants import EXIT_FAIL, EXIT_SUCCESS, Workflow from cg.exc import CgError from cg.meta.report.report_api import ReportAPI from cg.store.models import Case @@ -90,7 +90,7 @@ def generate_delivery_report( @OPTION_DRY_RUN @click.pass_context def generate_available_delivery_reports( - context: click.Context, pipeline: Pipeline, force_report: bool, dry_run: bool + context: click.Context, pipeline: Workflow, force_report: bool, dry_run: bool ) -> None: """Generates delivery reports for all cases that need one and stores them in housekeeper.""" diff --git a/cg/cli/generate/report/utils.py b/cg/cli/generate/report/utils.py index 0bec250622..6272183d17 100644 --- a/cg/cli/generate/report/utils.py +++ b/cg/cli/generate/report/utils.py @@ -8,7 +8,7 @@ from cg.constants import ( REPORT_SUPPORTED_DATA_DELIVERY, REPORT_SUPPORTED_PIPELINES, - Pipeline, + Workflow, ) from cg.meta.report.balsamic import BalsamicReportAPI from cg.meta.report.balsamic_qc import BalsamicQCReportAPI @@ -38,7 +38,7 @@ def get_report_case(context: click.Context, case_id: str) -> Case: # Missing or not valid internal case ID if not case_id or not case: LOG.warning("Invalid case ID. Retrieving available cases.") - pipeline: Pipeline = ( + pipeline: Workflow = ( report_api.analysis_api.pipeline if context.obj.meta_apis.get("report_api") else None ) cases_without_delivery_report: list[Case] = ( @@ -77,24 +77,24 @@ def get_report_api(context: click.Context, case: Case) -> ReportAPI: return get_report_api_pipeline(context, case.data_analysis) -def get_report_api_pipeline(context: click.Context, pipeline: Pipeline) -> ReportAPI: +def get_report_api_pipeline(context: click.Context, pipeline: Workflow) -> ReportAPI: """Resolves the report API given a specific pipeline.""" # Default report API pipeline: MIP-DNA - pipeline: Pipeline = pipeline if pipeline else Pipeline.MIP_DNA - dispatch_report_api: dict[Pipeline, ReportAPI] = { - Pipeline.BALSAMIC: BalsamicReportAPI( + pipeline: Workflow = pipeline if pipeline else Workflow.MIP_DNA + dispatch_report_api: dict[Workflow, ReportAPI] = { + Workflow.BALSAMIC: BalsamicReportAPI( config=context.obj, analysis_api=BalsamicAnalysisAPI(config=context.obj) ), - Pipeline.BALSAMIC_UMI: BalsamicUmiReportAPI( + Workflow.BALSAMIC_UMI: BalsamicUmiReportAPI( config=context.obj, analysis_api=BalsamicUmiAnalysisAPI(config=context.obj) ), - Pipeline.BALSAMIC_QC: BalsamicQCReportAPI( + Workflow.BALSAMIC_QC: BalsamicQCReportAPI( config=context.obj, analysis_api=BalsamicQCAnalysisAPI(config=context.obj) ), - Pipeline.MIP_DNA: MipDNAReportAPI( + Workflow.MIP_DNA: MipDNAReportAPI( config=context.obj, analysis_api=MipDNAAnalysisAPI(config=context.obj) ), - Pipeline.RNAFUSION: RnafusionReportAPI( + Workflow.RNAFUSION: RnafusionReportAPI( config=context.obj, analysis_api=RnafusionAnalysisAPI(config=context.obj) ), } diff --git a/cg/cli/get.py b/cg/cli/get.py index f805d2236c..7a28f304b9 100644 --- a/cg/cli/get.py +++ b/cg/cli/get.py @@ -10,7 +10,7 @@ from cg.store.store import Store LOG = logging.getLogger(__name__) -ANALYSIS_HEADERS = ["Analysis Date", "Pipeline", "Version"] +ANALYSIS_HEADERS = ["Analysis Date", "Workflow", "Version"] FAMILY_HEADERS = ["Case", "Name", "Customer", "Priority", "Panels", "Action"] FLOW_CELL_HEADERS = ["Flowcell", "Type", "Sequencer", "Date", "Archived?", "Status"] LINK_HEADERS = ["Sample", "Mother", "Father"] diff --git a/cg/cli/set/case.py b/cg/cli/set/case.py index 620a2fa064..a64337c3d6 100644 --- a/cg/cli/set/case.py +++ b/cg/cli/set/case.py @@ -4,7 +4,7 @@ import click -from cg.constants import DataDelivery, Pipeline, Priority +from cg.constants import DataDelivery, Priority, Workflow from cg.constants.constants import CaseActions from cg.models.cg_config import CGConfig from cg.store.models import Case, Customer, Panel @@ -21,7 +21,7 @@ "-d", "--data-analysis", "data_analysis", - type=EnumChoice(Pipeline), + type=EnumChoice(Workflow), help="Update case data analysis", ) @click.option( @@ -40,7 +40,7 @@ def set_case( context: CGConfig, action: str | None, - data_analysis: Pipeline | None, + data_analysis: Workflow | None, data_delivery: DataDelivery | None, priority: Priority | None, panel_abbreviations: tuple[str] | None, @@ -116,7 +116,7 @@ def update_customer(case: Case, customer_id: str, status_db: Store) -> None: case.customer = customer_obj -def update_data_analysis(case: Case, data_analysis: Pipeline) -> None: +def update_data_analysis(case: Case, data_analysis: Workflow) -> None: LOG.info(f"Update data_analysis: {case.data_analysis or 'NA'} -> {data_analysis}") case.data_analysis = data_analysis diff --git a/cg/cli/upload/base.py b/cg/cli/upload/base.py index 33fb6d49d9..69c7022bfa 100644 --- a/cg/cli/upload/base.py +++ b/cg/cli/upload/base.py @@ -30,7 +30,7 @@ ) from cg.cli.upload.utils import suggest_cases_to_upload from cg.cli.upload.validate import validate -from cg.constants import Pipeline +from cg.constants import Workflow from cg.exc import AnalysisAlreadyUploadedError from cg.meta.upload.balsamic.balsamic import BalsamicUploadAPI from cg.meta.upload.microsalt.microsalt_upload_api import MicrosaltUploadAPI @@ -74,13 +74,13 @@ def upload(context: click.Context, case_id: str | None, restart: bool): # Analysis being uploaded or it has been already uploaded return - if Pipeline.BALSAMIC in case.data_analysis: + if Workflow.BALSAMIC in case.data_analysis: upload_api = BalsamicUploadAPI(config_object) - elif case.data_analysis == Pipeline.RNAFUSION: + elif case.data_analysis == Workflow.RNAFUSION: upload_api = RnafusionUploadAPI(config_object) - elif case.data_analysis == Pipeline.MIP_RNA: + elif case.data_analysis == Workflow.MIP_RNA: upload_api = MipRNAUploadAPI(config_object) - elif case.data_analysis == Pipeline.MICROSALT: + elif case.data_analysis == Workflow.MICROSALT: upload_api = MicrosaltUploadAPI(config_object) context.obj.meta_apis["upload_api"] = upload_api @@ -92,9 +92,9 @@ def upload(context: click.Context, case_id: str | None, restart: bool): @upload.command("auto") -@click.option("--pipeline", type=EnumChoice(Pipeline), help="Limit to specific pipeline") +@click.option("--pipeline", type=EnumChoice(Workflow), help="Limit to specific pipeline") @click.pass_context -def upload_all_completed_analyses(context: click.Context, pipeline: Pipeline = None): +def upload_all_completed_analyses(context: click.Context, pipeline: Workflow = None): """Upload all completed analyses""" LOG.info("----------------- AUTO -----------------") diff --git a/cg/cli/upload/clinical_delivery.py b/cg/cli/upload/clinical_delivery.py index 001fdbff6d..2e9c108be0 100644 --- a/cg/cli/upload/clinical_delivery.py +++ b/cg/cli/upload/clinical_delivery.py @@ -8,7 +8,7 @@ from cg.apps.tb import TrailblazerAPI from cg.apps.tb.models import TrailblazerAnalysis -from cg.constants import EXIT_FAIL, EXIT_SUCCESS, Pipeline, Priority +from cg.constants import EXIT_FAIL, EXIT_SUCCESS, Priority, Workflow from cg.constants.constants import DRY_RUN from cg.constants.delivery import PIPELINE_ANALYSIS_TAG_MAP from cg.constants.tb import AnalysisTypes @@ -73,7 +73,7 @@ def upload_clinical_delivery(context: click.Context, case_id: str, dry_run: bool config_path=rsync_api.trailblazer_config_path.as_posix(), out_dir=rsync_api.log_dir.as_posix(), slurm_quality_of_service=Priority.priority_to_slurm_qos().get(case.priority), - data_analysis=Pipeline.RSYNC, + data_analysis=Workflow.RSYNC, ticket=case.latest_ticket, ) trailblazer_api.add_upload_job_to_analysis(analysis_id=analysis.id, slurm_id=job_id) @@ -89,7 +89,7 @@ def auto_fastq(context: click.Context, dry_run: bool): exit_code: int = EXIT_SUCCESS status_db: Store = context.obj.status_db trailblazer_api: TrailblazerAPI = context.obj.trailblazer_api - for analysis_obj in status_db.get_analyses_to_upload(pipeline=Pipeline.FASTQ): + for analysis_obj in status_db.get_analyses_to_upload(pipeline=Workflow.FASTQ): if analysis_obj.case.analyses[0].uploaded_at: LOG.debug( f"Newer analysis already uploaded for {analysis_obj.case.internal_id}, skipping" diff --git a/cg/cli/upload/observations/observations.py b/cg/cli/upload/observations/observations.py index 56d35571e9..460de4f7ff 100644 --- a/cg/cli/upload/observations/observations.py +++ b/cg/cli/upload/observations/observations.py @@ -17,7 +17,7 @@ OPTION_DRY, OPTION_LOQUSDB_SUPPORTED_PIPELINES, ) -from cg.constants.constants import Pipeline +from cg.constants.constants import Workflow from cg.exc import CaseNotFoundError, LoqusdbError from cg.meta.observations.balsamic_observations_api import BalsamicObservationsAPI from cg.meta.observations.mip_dna_observations_api import MipDNAObservationsAPI @@ -55,7 +55,7 @@ def upload_observations_to_loqusdb(context: CGConfig, case_id: str | None, dry_r @OPTION_DRY @click.pass_context def upload_available_observations_to_loqusdb( - context: click.Context, pipeline: Pipeline | None, dry_run: bool + context: click.Context, pipeline: Workflow | None, dry_run: bool ): """Uploads the available observations to Loqusdb.""" diff --git a/cg/cli/upload/observations/utils.py b/cg/cli/upload/observations/utils.py index 9877c137f3..0540d1ef29 100644 --- a/cg/cli/upload/observations/utils.py +++ b/cg/cli/upload/observations/utils.py @@ -4,7 +4,7 @@ from sqlalchemy.orm import Query -from cg.constants.constants import Pipeline +from cg.constants.constants import Workflow from cg.constants.observations import LOQUSDB_SUPPORTED_PIPELINES from cg.constants.sequencing import SequencingMethod from cg.exc import CaseNotFoundError, LoqusdbUploadCaseError @@ -54,8 +54,8 @@ def get_observations_api( ) -> MipDNAObservationsAPI | BalsamicObservationsAPI: """Return an observations API given a specific case object.""" observations_apis = { - Pipeline.MIP_DNA: MipDNAObservationsAPI(context, get_sequencing_method(case)), - Pipeline.BALSAMIC: BalsamicObservationsAPI(context, get_sequencing_method(case)), + Workflow.MIP_DNA: MipDNAObservationsAPI(context, get_sequencing_method(case)), + Workflow.BALSAMIC: BalsamicObservationsAPI(context, get_sequencing_method(case)), } return observations_apis[case.data_analysis] diff --git a/cg/cli/upload/scout.py b/cg/cli/upload/scout.py index 07d840d2a2..05d49f5dcc 100644 --- a/cg/cli/upload/scout.py +++ b/cg/cli/upload/scout.py @@ -9,7 +9,7 @@ from cg.apps.housekeeper.hk import HousekeeperAPI from cg.apps.scout.scoutapi import ScoutAPI from cg.cli.upload.utils import suggest_cases_to_upload -from cg.constants import Pipeline +from cg.constants import Workflow from cg.constants.constants import FileFormat from cg.constants.scout import ScoutCustomCaseReportTags from cg.io.controller import WriteStream @@ -237,12 +237,12 @@ def upload_multiqc_to_scout(context: CGConfig, case_id: str, dry_run: bool) -> N def get_upload_api(case: Case, cg_config: CGConfig) -> UploadAPI: """Return the upload API based on the data analysis type""" - analysis_apis: dict[Pipeline, UploadAPI] = { - Pipeline.BALSAMIC: BalsamicAnalysisAPI, - Pipeline.BALSAMIC_UMI: BalsamicUmiAnalysisAPI, - Pipeline.MIP_RNA: MipRNAAnalysisAPI, - Pipeline.MIP_DNA: MipDNAAnalysisAPI, - Pipeline.RNAFUSION: RnafusionAnalysisAPI, + analysis_apis: dict[Workflow, UploadAPI] = { + Workflow.BALSAMIC: BalsamicAnalysisAPI, + Workflow.BALSAMIC_UMI: BalsamicUmiAnalysisAPI, + Workflow.MIP_RNA: MipRNAAnalysisAPI, + Workflow.MIP_DNA: MipDNAAnalysisAPI, + Workflow.RNAFUSION: RnafusionAnalysisAPI, } return UploadAPI( diff --git a/cg/cli/upload/utils.py b/cg/cli/upload/utils.py index b0d0595acb..3af9502751 100644 --- a/cg/cli/upload/utils.py +++ b/cg/cli/upload/utils.py @@ -4,7 +4,7 @@ import click -from cg.constants import Pipeline +from cg.constants import Workflow from cg.constants.constants import MAX_ITEMS_TO_RETRIEVE from cg.store.models import Analysis from cg.store.store import Store @@ -12,7 +12,7 @@ LOG = logging.getLogger(__name__) -def suggest_cases_to_upload(status_db: Store, pipeline: Pipeline | None = None) -> None: +def suggest_cases_to_upload(status_db: Store, pipeline: Workflow | None = None) -> None: """Print a list of suggested cases to upload.""" LOG.warning("Provide a case, suggestions:") records: list[Analysis] = status_db.get_analyses_to_upload(pipeline=pipeline)[ diff --git a/cg/cli/workflow/fastq/base.py b/cg/cli/workflow/fastq/base.py index 37eaa20d25..45972d5762 100644 --- a/cg/cli/workflow/fastq/base.py +++ b/cg/cli/workflow/fastq/base.py @@ -4,7 +4,7 @@ from cg.cli.workflow.commands import ARGUMENT_CASE_ID from cg.cli.workflow.fastq.fastq_service import FastqService -from cg.constants.constants import DRY_RUN, Pipeline +from cg.constants.constants import DRY_RUN, Workflow from cg.meta.workflow.analysis import AnalysisAPI from cg.store.store import Store @@ -41,5 +41,5 @@ def store_fastq_analysis(context: click.Context, case_id: str, dry_run: bool = F def store_available_fastq_analysis(context: click.Context, dry_run: bool = False): """Creates an analysis object in status-db for all fastq cases to be delivered""" status_db: Store = context.obj.status_db - for case in status_db.cases_to_analyze(pipeline=Pipeline.FASTQ, threshold=False): + for case in status_db.cases_to_analyze(pipeline=Workflow.FASTQ, threshold=False): context.invoke(store_fastq_analysis, case_id=case.internal_id, dry_run=dry_run) diff --git a/cg/cli/workflow/fastq/fastq_service.py b/cg/cli/workflow/fastq/fastq_service.py index 9b3757f5eb..c2075ab497 100644 --- a/cg/cli/workflow/fastq/fastq_service.py +++ b/cg/cli/workflow/fastq/fastq_service.py @@ -1,7 +1,7 @@ import datetime as dt from cg.apps.tb.api import TrailblazerAPI -from cg.constants.constants import AnalysisType, Pipeline +from cg.constants.constants import AnalysisType, Workflow from cg.constants.tb import AnalysisStatus from cg.exc import CaseNotFoundError from cg.store.models import Analysis, Case @@ -25,7 +25,7 @@ def _get_case(self, case_id: str) -> Case: def _add_analysis_to_store(self, case: Case) -> None: new_analysis: Analysis = self.store.add_analysis( - pipeline=Pipeline.FASTQ, + pipeline=Workflow.FASTQ, completed_at=dt.datetime.now(), primary=True, started_at=dt.datetime.now(), @@ -38,7 +38,7 @@ def _add_analysis_to_trailblazer(self, case: Case) -> None: self.trailblazer_api.add_pending_analysis( case_id=case.internal_id, analysis_type=AnalysisType.OTHER, - data_analysis=Pipeline.FASTQ, + data_analysis=Workflow.FASTQ, config_path="", out_dir="", slurm_quality_of_service=case.slurm_priority, diff --git a/cg/constants/constants.py b/cg/constants/constants.py index ccb52406cc..99ed2da688 100644 --- a/cg/constants/constants.py +++ b/cg/constants/constants.py @@ -106,7 +106,7 @@ class PrepCategory(StrEnum): STATUS_OPTIONS = ("affected", "unaffected", "unknown") -class Pipeline(StrEnum): +class Workflow(StrEnum): BALSAMIC: str = "balsamic" BALSAMIC_PON: str = "balsamic-pon" BALSAMIC_QC: str = "balsamic-qc" @@ -253,7 +253,7 @@ class Strandedness(StrEnum): UNSTRANDED: str = "unstranded" -PIPELINES_USING_PARTIAL_ANALYSES: list[Pipeline] = [Pipeline.MICROSALT, Pipeline.MUTANT] +PIPELINES_USING_PARTIAL_ANALYSES: list[Workflow] = [Workflow.MICROSALT, Workflow.MUTANT] class MultiQC(StrEnum): diff --git a/cg/constants/delivery.py b/cg/constants/delivery.py index 17c1e2d2ee..da5c5bb88c 100644 --- a/cg/constants/delivery.py +++ b/cg/constants/delivery.py @@ -1,22 +1,22 @@ """Constants for delivery.""" -from cg.constants.constants import Pipeline +from cg.constants.constants import Workflow from cg.constants.housekeeper_tags import ( HK_DELIVERY_REPORT_TAG, AlignmentFileTag, AnalysisTag, ) -ONLY_ONE_CASE_PER_TICKET: list[Pipeline] = [ - Pipeline.FASTQ, - Pipeline.MICROSALT, - Pipeline.MUTANT, +ONLY_ONE_CASE_PER_TICKET: list[Workflow] = [ + Workflow.FASTQ, + Workflow.MICROSALT, + Workflow.MUTANT, ] -SKIP_MISSING: list[Pipeline] = [ - Pipeline.FASTQ, - Pipeline.MICROSALT, - Pipeline.MUTANT, +SKIP_MISSING: list[Workflow] = [ + Workflow.FASTQ, + Workflow.MICROSALT, + Workflow.MUTANT, ] BALSAMIC_ANALYSIS_CASE_TAGS: list[set[str]] = [ @@ -168,40 +168,40 @@ ] -PIPELINE_ANALYSIS_TAG_MAP: dict[Pipeline, dict] = { - Pipeline.BALSAMIC: { +PIPELINE_ANALYSIS_TAG_MAP: dict[Workflow, dict] = { + Workflow.BALSAMIC: { "case_tags": BALSAMIC_ANALYSIS_CASE_TAGS, "sample_tags": BALSAMIC_ANALYSIS_SAMPLE_TAGS, }, - Pipeline.BALSAMIC_QC: { + Workflow.BALSAMIC_QC: { "case_tags": BALSAMIC_QC_ANALYSIS_CASE_TAGS, "sample_tags": BALSAMIC_QC_ANALYSIS_SAMPLE_TAGS, }, - Pipeline.BALSAMIC_UMI: { + Workflow.BALSAMIC_UMI: { "case_tags": BALSAMIC_UMI_ANALYSIS_CASE_TAGS, "sample_tags": BALSAMIC_UMI_ANALYSIS_SAMPLE_TAGS, }, - Pipeline.MIP_DNA: { + Workflow.MIP_DNA: { "case_tags": MIP_DNA_ANALYSIS_CASE_TAGS, "sample_tags": MIP_DNA_ANALYSIS_SAMPLE_TAGS, }, - Pipeline.MIP_RNA: { + Workflow.MIP_RNA: { "case_tags": MIP_RNA_ANALYSIS_CASE_TAGS, "sample_tags": MIP_RNA_ANALYSIS_SAMPLE_TAGS, }, - Pipeline.MICROSALT: { + Workflow.MICROSALT: { "case_tags": MICROSALT_ANALYSIS_CASE_TAGS, "sample_tags": MICROSALT_ANALYSIS_SAMPLE_TAGS, }, - Pipeline.FASTQ: { + Workflow.FASTQ: { "case_tags": FASTQ_ANALYSIS_CASE_TAGS, "sample_tags": FASTQ_ANALYSIS_SAMPLE_TAGS, }, - Pipeline.MUTANT: { + Workflow.MUTANT: { "case_tags": SARSCOV2_ANALYSIS_CASE_TAGS, "sample_tags": SARSCOV2_ANALYSIS_SAMPLE_TAGS, }, - Pipeline.RNAFUSION: { + Workflow.RNAFUSION: { "case_tags": RNAFUSION_ANALYSIS_CASE_TAGS, "sample_tags": RNAFUSION_ANALYSIS_SAMPLE_TAGS, }, diff --git a/cg/constants/housekeeper_tags.py b/cg/constants/housekeeper_tags.py index 0190f4bf9b..36d43e2578 100644 --- a/cg/constants/housekeeper_tags.py +++ b/cg/constants/housekeeper_tags.py @@ -2,7 +2,7 @@ from enum import StrEnum -from cg.constants.constants import Pipeline +from cg.constants.constants import Workflow class AlignmentFileTag(StrEnum): @@ -114,13 +114,13 @@ class BalsamicProtectedTags: WORKFLOW_PROTECTED_TAGS = { - str(Pipeline.BALSAMIC): BalsamicProtectedTags.QC + BalsamicProtectedTags.VARIANT_CALLERS, - str(Pipeline.BALSAMIC_QC): BalsamicProtectedTags.QC, - str(Pipeline.BALSAMIC_PON): [], - str(Pipeline.BALSAMIC_UMI): BalsamicProtectedTags.QC + BalsamicProtectedTags.VARIANT_CALLERS, - str(Pipeline.FASTQ): [], - str(Pipeline.FLUFFY): ["NIPT_csv", "MultiQC"], - str(Pipeline.MICROSALT): [ + Workflow.BALSAMIC: BalsamicProtectedTags.QC + BalsamicProtectedTags.VARIANT_CALLERS, + Workflow.BALSAMIC_QC: BalsamicProtectedTags.QC, + Workflow.BALSAMIC_PON: [], + Workflow.BALSAMIC_UMI: BalsamicProtectedTags.QC + BalsamicProtectedTags.VARIANT_CALLERS, + Workflow.FASTQ: [], + Workflow.FLUFFY: ["NIPT_csv", "MultiQC"], + Workflow.MICROSALT: [ ["microsalt-log"], ["config"], ["qc-report", "visualization"], @@ -129,7 +129,7 @@ class BalsamicProtectedTags: ["microsalt-config"], ["assembly"], ], - str(Pipeline.MIP_DNA): [ + Workflow.MIP_DNA: [ ["vcf-snv-clinical"], ["vcf-clinical"], # legacy ["vcf-snv-research"], @@ -165,7 +165,7 @@ class BalsamicProtectedTags: ["multiqc-html"], ["storage"], ], - str(Pipeline.MIP_RNA): [ + Workflow.MIP_RNA: [ ["vcf-snv-clinical"], ["vcf-snv-research"], ["mip-config"], @@ -177,7 +177,7 @@ class BalsamicProtectedTags: ["fusion", "vcf"], ["salmon-quant"], ], - str(Pipeline.MUTANT): [ + Workflow.MUTANT: [ ["fohm-delivery", "instrument-properties"], ["fohm-delivery", "pangolin-typing-fohm", "csv"], ["vcf", "vcf-report", "fohm-delivery"], @@ -194,7 +194,7 @@ class BalsamicProtectedTags: ["gisaid-log"], ["gisaid-csv"], ], - str(Pipeline.RNAFUSION): [ + Workflow.RNAFUSION: [ [AnalysisTag.FUSION, AnalysisTag.ARRIBA], [AnalysisTag.FUSION, AnalysisTag.STARFUSION], [AnalysisTag.FUSION, AnalysisTag.FUSIONCATCHER], diff --git a/cg/constants/observations.py b/cg/constants/observations.py index 3f51019566..5e0d97832e 100644 --- a/cg/constants/observations.py +++ b/cg/constants/observations.py @@ -2,11 +2,11 @@ from enum import Enum, StrEnum -from cg.constants.constants import Pipeline +from cg.constants.constants import Workflow from cg.constants.sequencing import SequencingMethod LOQUSDB_ID = "_id" -LOQUSDB_SUPPORTED_PIPELINES = [Pipeline.MIP_DNA, Pipeline.BALSAMIC] +LOQUSDB_SUPPORTED_PIPELINES = [Workflow.MIP_DNA, Workflow.BALSAMIC] LOQUSDB_MIP_SEQUENCING_METHODS = [SequencingMethod.WGS, SequencingMethod.WES] LOQUSDB_BALSAMIC_SEQUENCING_METHODS = [SequencingMethod.WGS] diff --git a/cg/constants/report.py b/cg/constants/report.py index fd38a2b002..60f3228dc2 100644 --- a/cg/constants/report.py +++ b/cg/constants/report.py @@ -1,16 +1,16 @@ """Delivery report constants.""" from cg.constants import DataDelivery -from cg.constants.constants import Pipeline +from cg.constants.constants import Workflow BALSAMIC_REPORT_ACCREDITED_PANELS: list[str] = ["gmsmyeloid"] -REPORT_SUPPORTED_PIPELINES: tuple[Pipeline, ...] = ( - Pipeline.BALSAMIC, - Pipeline.BALSAMIC_UMI, - Pipeline.BALSAMIC_QC, - Pipeline.MIP_DNA, - Pipeline.RNAFUSION, +REPORT_SUPPORTED_PIPELINES: tuple[Workflow, ...] = ( + Workflow.BALSAMIC, + Workflow.BALSAMIC_UMI, + Workflow.BALSAMIC_QC, + Workflow.MIP_DNA, + Workflow.RNAFUSION, ) REPORT_SUPPORTED_DATA_DELIVERY: tuple[DataDelivery, ...] = ( diff --git a/cg/meta/clean/api.py b/cg/meta/clean/api.py index 603db91458..a240482d40 100644 --- a/cg/meta/clean/api.py +++ b/cg/meta/clean/api.py @@ -6,7 +6,7 @@ from housekeeper.store.models import File, Version from cg.apps.housekeeper.hk import HousekeeperAPI -from cg.constants.constants import Pipeline +from cg.constants.constants import Workflow from cg.constants.housekeeper_tags import WORKFLOW_PROTECTED_TAGS from cg.store.models import Analysis from cg.store.store import Store @@ -19,7 +19,7 @@ def __init__(self, status_db: Store, housekeeper_api: HousekeeperAPI): self.status_db = status_db self.housekeeper_api = housekeeper_api - def get_bundle_files(self, before: datetime, pipeline: Pipeline) -> Iterator[list[File]]: + def get_bundle_files(self, before: datetime, pipeline: Workflow) -> Iterator[list[File]]: """Get any bundle files for a specific version""" analysis: Analysis @@ -77,8 +77,8 @@ def has_protected_tags(file: File, protected_tags_lists: list[list[str]]) -> boo def get_unprotected_existing_bundle_files(self, before: datetime) -> Iterator[File]: """Returns all existing bundle files from analyses started before 'before' that have no protected tags""" - pipeline: Pipeline - for pipeline in Pipeline: + pipeline: Workflow + for pipeline in Workflow: protected_tags_lists = WORKFLOW_PROTECTED_TAGS.get(pipeline) if not protected_tags_lists: LOG.debug(f"No protected tags defined for {pipeline}, skipping") diff --git a/cg/meta/orders/case_submitter.py b/cg/meta/orders/case_submitter.py index 8f9e4e2f1a..7b2b438e5e 100644 --- a/cg/meta/orders/case_submitter.py +++ b/cg/meta/orders/case_submitter.py @@ -2,7 +2,7 @@ import logging from cg.constants import DataDelivery, Priority -from cg.constants.constants import CaseActions, Pipeline +from cg.constants.constants import CaseActions, Workflow from cg.constants.pedigree import Pedigree from cg.exc import OrderError from cg.meta.orders.lims import process_lims @@ -171,7 +171,7 @@ def order_to_status(order: OrderIn) -> dict: ) panels: set[str] = set() - if data_analysis == Pipeline.MIP_DNA: + if data_analysis == Workflow.MIP_DNA: panels: set[str] = { panel for sample in case_samples for panel in sample.panels if panel } @@ -353,7 +353,7 @@ def _create_sample(self, case, customer_obj, order, ordered, sample, ticket): def _create_case(self, case: dict, customer_obj: Customer, ticket: str): case_obj = self.status.add_case( cohorts=case["cohorts"], - data_analysis=Pipeline(case["data_analysis"]), + data_analysis=Workflow(case["data_analysis"]), data_delivery=DataDelivery(case["data_delivery"]), name=case["name"], priority=case["priority"], diff --git a/cg/meta/orders/fastq_submitter.py b/cg/meta/orders/fastq_submitter.py index 8bfe3ed647..df085605b9 100644 --- a/cg/meta/orders/fastq_submitter.py +++ b/cg/meta/orders/fastq_submitter.py @@ -1,7 +1,7 @@ import datetime as dt from cg.constants import DataDelivery, GenePanelMasterList -from cg.constants.constants import CustomerId, Pipeline, PrepCategory +from cg.constants.constants import CustomerId, PrepCategory, Workflow from cg.constants.priority import Priority from cg.exc import OrderError from cg.meta.orders.lims import process_lims @@ -57,7 +57,7 @@ def order_to_status(order: OrderIn) -> dict: def create_maf_case(self, sample_obj: Sample) -> None: """Add a MAF case to the Status database.""" case: Case = self.status.add_case( - data_analysis=Pipeline(Pipeline.MIP_DNA), + data_analysis=Workflow(Workflow.MIP_DNA), data_delivery=DataDelivery(DataDelivery.NO_DELIVERY), name="_".join([sample_obj.name, "MAF"]), panels=[GenePanelMasterList.OMIM_AUTO], @@ -111,7 +111,7 @@ def store_items_in_status( new_samples.append(new_sample) if not case: case = self.status.add_case( - data_analysis=Pipeline(submitted_case["data_analysis"]), + data_analysis=Workflow(submitted_case["data_analysis"]), data_delivery=DataDelivery(submitted_case["data_delivery"]), name=ticket_id, panels=None, diff --git a/cg/meta/orders/metagenome_submitter.py b/cg/meta/orders/metagenome_submitter.py index d9d7aed07a..0d918b6fae 100644 --- a/cg/meta/orders/metagenome_submitter.py +++ b/cg/meta/orders/metagenome_submitter.py @@ -1,7 +1,7 @@ import datetime as dt from cg.constants import DataDelivery -from cg.constants.constants import Pipeline +from cg.constants.constants import Workflow from cg.constants.subject import Sex from cg.exc import OrderError from cg.meta.orders.lims import process_lims @@ -118,7 +118,7 @@ def store_items_in_status( if not case: case = self.status.add_case( - data_analysis=Pipeline(case_dict["data_analysis"]), + data_analysis=Workflow(case_dict["data_analysis"]), data_delivery=DataDelivery(case_dict["data_delivery"]), name=str(ticket_id), panels=None, diff --git a/cg/meta/orders/microbial_submitter.py b/cg/meta/orders/microbial_submitter.py index 4781f556b7..eb078e2899 100644 --- a/cg/meta/orders/microbial_submitter.py +++ b/cg/meta/orders/microbial_submitter.py @@ -1,7 +1,7 @@ import datetime as dt from cg.constants import DataDelivery -from cg.constants.constants import Pipeline +from cg.constants.constants import Workflow from cg.constants.subject import Sex from cg.meta.orders.lims import process_lims from cg.meta.orders.submitter import Submitter @@ -64,7 +64,7 @@ def submit_order(self, order: OrderIn) -> dict: ticket_id=order.ticket, items=status_data["samples"], comment=status_data["comment"], - data_analysis=Pipeline(status_data["data_analysis"]), + data_analysis=Workflow(status_data["data_analysis"]), data_delivery=DataDelivery(status_data["data_delivery"]), ) return {"project": project_data, "records": samples} @@ -73,7 +73,7 @@ def store_items_in_status( self, comment: str, customer_id: str, - data_analysis: Pipeline, + data_analysis: Workflow, data_delivery: DataDelivery, order: str, ordered: dt.datetime, diff --git a/cg/meta/orders/pool_submitter.py b/cg/meta/orders/pool_submitter.py index e0963ecea8..279872832d 100644 --- a/cg/meta/orders/pool_submitter.py +++ b/cg/meta/orders/pool_submitter.py @@ -1,21 +1,14 @@ import datetime as dt from cg.constants import DataDelivery -from cg.constants.constants import Pipeline +from cg.constants.constants import Workflow from cg.exc import OrderError from cg.meta.orders.lims import process_lims from cg.meta.orders.submitter import Submitter from cg.models.orders.order import OrderIn from cg.models.orders.sample_base import SexEnum from cg.models.orders.samples import RmlSample -from cg.store.models import ( - ApplicationVersion, - Customer, - Case, - CaseSample, - Pool, - Sample, -) +from cg.store.models import ApplicationVersion, Case, CaseSample, Customer, Pool, Sample class PoolSubmitter(Submitter): @@ -130,7 +123,7 @@ def store_items_in_status( customer=customer, case_name=case_name ) if not case: - data_analysis: Pipeline = Pipeline(pool["data_analysis"]) + data_analysis: Workflow = Workflow(pool["data_analysis"]) data_delivery: DataDelivery = DataDelivery(pool["data_delivery"]) case = self.status.add_case( data_analysis=data_analysis, diff --git a/cg/meta/report/balsamic.py b/cg/meta/report/balsamic.py index b2fff9b425..bdd789ce17 100644 --- a/cg/meta/report/balsamic.py +++ b/cg/meta/report/balsamic.py @@ -15,7 +15,7 @@ REQUIRED_SAMPLE_METADATA_BALSAMIC_TO_WGS_FIELDS, REQUIRED_SAMPLE_METHODS_FIELDS, REQUIRED_SAMPLE_TIMESTAMP_FIELDS, - Pipeline, + Workflow, ) from cg.constants.scout import BALSAMIC_CASE_TAGS from cg.meta.report.field_validators import get_million_read_pairs @@ -167,7 +167,7 @@ def get_required_fields(self, case: CaseModel) -> dict: analysis_type: str = case.data_analysis.type required_data_analysis_fields: list[str] = ( REQUIRED_DATA_ANALYSIS_FIELDS - if self.analysis_api.pipeline == Pipeline.BALSAMIC_QC + if self.analysis_api.pipeline == Workflow.BALSAMIC_QC else REQUIRED_DATA_ANALYSIS_BALSAMIC_FIELDS ) required_sample_metadata_fields: list[str] = [] @@ -210,7 +210,7 @@ def get_required_fields(self, case: CaseModel) -> dict: def get_template_name(self) -> str: """Return template name to render the delivery report.""" - return Pipeline.BALSAMIC + "_report.html" + return Workflow.BALSAMIC + "_report.html" def get_upload_case_tags(self) -> dict: """Return Balsamic upload case tags.""" diff --git a/cg/meta/report/mip_dna.py b/cg/meta/report/mip_dna.py index 215daf9b48..8d0b72b112 100644 --- a/cg/meta/report/mip_dna.py +++ b/cg/meta/report/mip_dna.py @@ -14,7 +14,7 @@ REQUIRED_SAMPLE_METHODS_FIELDS, REQUIRED_SAMPLE_MIP_DNA_FIELDS, REQUIRED_SAMPLE_TIMESTAMP_FIELDS, - Pipeline, + Workflow, ) from cg.constants.scout import MIP_CASE_TAGS from cg.meta.report.field_validators import get_million_read_pairs @@ -125,7 +125,7 @@ def get_sample_metadata_required_fields(case: CaseModel) -> dict: def get_template_name(self) -> str: """Return template name to render the delivery report.""" - return Pipeline.MIP_DNA + "_report.html" + return Workflow.MIP_DNA + "_report.html" def get_upload_case_tags(self) -> dict: """Return MIP DNA upload case tags.""" diff --git a/cg/meta/report/report_api.py b/cg/meta/report/report_api.py index c69d3b8689..19c115976c 100644 --- a/cg/meta/report/report_api.py +++ b/cg/meta/report/report_api.py @@ -9,7 +9,7 @@ from jinja2 import Environment, PackageLoader, Template, select_autoescape from sqlalchemy.orm import Query -from cg.constants import Pipeline +from cg.constants import Workflow from cg.constants.constants import MAX_ITEMS_TO_RETRIEVE, FileFormat from cg.constants.housekeeper_tags import HK_DELIVERY_REPORT_TAG from cg.exc import DeliveryReportError @@ -126,7 +126,7 @@ def render_delivery_report(self, report_data: dict) -> str: template: Template = env.get_template(self.get_template_name()) return template.render(**report_data) - def get_cases_without_delivery_report(self, pipeline: Pipeline) -> list[Case]: + def get_cases_without_delivery_report(self, pipeline: Workflow) -> list[Case]: """Returns a list of cases that has been stored and need a delivery report.""" stored_cases: list[Case] = [] analyses: Query = self.status_db.analyses_to_delivery_report(pipeline=pipeline)[ @@ -147,7 +147,7 @@ def get_cases_without_delivery_report(self, pipeline: Pipeline) -> list[Case]: ) return stored_cases - def get_cases_without_uploaded_delivery_report(self, pipeline: Pipeline) -> list[Case]: + def get_cases_without_uploaded_delivery_report(self, pipeline: Workflow) -> list[Case]: """Returns a list of cases that need a delivery report to be uploaded.""" analyses: Query = self.status_db.analyses_to_upload_delivery_reports(pipeline=pipeline)[ :MAX_ITEMS_TO_RETRIEVE diff --git a/cg/meta/report/rnafusion.py b/cg/meta/report/rnafusion.py index 5ccb7418e7..4fe7086a4f 100644 --- a/cg/meta/report/rnafusion.py +++ b/cg/meta/report/rnafusion.py @@ -10,7 +10,7 @@ REQUIRED_SAMPLE_METHODS_FIELDS, REQUIRED_SAMPLE_RNAFUSION_FIELDS, REQUIRED_SAMPLE_TIMESTAMP_FIELDS, - Pipeline, + Workflow, ) from cg.constants.constants import GenomeVersion from cg.constants.scout import RNAFUSION_CASE_TAGS @@ -86,7 +86,7 @@ def is_report_accredited( def get_template_name(self) -> str: """Return template name to render the delivery report.""" - return Pipeline.RNAFUSION + "_report.html" + return Workflow.RNAFUSION + "_report.html" def get_required_fields(self, case: CaseModel) -> dict: """Return dictionary with the delivery report required fields for Rnafusion.""" diff --git a/cg/meta/rsync/rsync_api.py b/cg/meta/rsync/rsync_api.py index 066bd483db..f35292c962 100644 --- a/cg/meta/rsync/rsync_api.py +++ b/cg/meta/rsync/rsync_api.py @@ -8,7 +8,7 @@ from cg.apps.slurm.slurm_api import SlurmAPI from cg.apps.tb import TrailblazerAPI -from cg.constants import Pipeline +from cg.constants import Workflow from cg.constants.constants import FileFormat from cg.constants.delivery import INBOX_NAME from cg.constants.priority import SlurmAccount, SlurmQos @@ -35,7 +35,7 @@ def __init__(self, config: CGConfig): self.account: str = config.data_delivery.account self.log_dir: Path = Path(config.data_delivery.base_path) self.mail_user: str = config.data_delivery.mail_user - self.pipeline: str = Pipeline.RSYNC + self.pipeline: str = Workflow.RSYNC @property def slurm_quality_of_service(self) -> str: @@ -136,7 +136,7 @@ def add_to_trailblazer_api( out_dir=self.log_dir.as_posix(), slurm_quality_of_service=self.slurm_quality_of_service, email=self.mail_user, - data_analysis=Pipeline.RSYNC, + data_analysis=Workflow.RSYNC, ticket=ticket, ) @@ -231,7 +231,7 @@ def run_rsync_on_slurm(self, ticket: str, dry_run: bool) -> int: source_and_destination_paths: dict[str, Path] = self.get_source_and_destination_paths( ticket=ticket, customer_internal_id=customer_internal_id ) - if cases[0].data_analysis == Pipeline.MUTANT: + if cases[0].data_analysis == Workflow.MUTANT: LOG.info("Delivering report for SARS-COV-2 analysis") commands = COVID_RSYNC.format( source_path=source_and_destination_paths["delivery_source_path"], diff --git a/cg/meta/upload/gt.py b/cg/meta/upload/gt.py index 864fc029dd..8a884090ad 100644 --- a/cg/meta/upload/gt.py +++ b/cg/meta/upload/gt.py @@ -5,7 +5,7 @@ from cg.apps.gt import GenotypeAPI from cg.apps.housekeeper.hk import HousekeeperAPI -from cg.constants.constants import FileFormat, Pipeline, PrepCategory +from cg.constants.constants import FileFormat, PrepCategory, Workflow from cg.constants.housekeeper_tags import HkMipAnalysisTag from cg.constants.subject import Sex from cg.io.controller import ReadFile @@ -46,14 +46,14 @@ def data(self, analysis_obj: Analysis) -> dict: hk_version = self.hk.last_version(case_id) hk_bcf = self.get_bcf_file(hk_version) data = {"bcf": hk_bcf.full_path} - if analysis_obj.pipeline in [Pipeline.BALSAMIC, Pipeline.BALSAMIC_UMI]: + if analysis_obj.pipeline in [Workflow.BALSAMIC, Workflow.BALSAMIC_UMI]: data["samples_sex"] = self._get_samples_sex_balsamic(case_obj=analysis_obj.case) - elif analysis_obj.pipeline == Pipeline.MIP_DNA: + elif analysis_obj.pipeline == Workflow.MIP_DNA: data["samples_sex"] = self._get_samples_sex_mip( case_obj=analysis_obj.case, hk_version=hk_version ) else: - raise ValueError(f"Pipeline {analysis_obj.pipeline} does not support Genotype upload") + raise ValueError(f"Workflow {analysis_obj.pipeline} does not support Genotype upload") return data def _get_samples_sex_mip(self, case_obj: Case, hk_version: Version) -> dict: diff --git a/cg/meta/upload/nipt/nipt.py b/cg/meta/upload/nipt/nipt.py index 3852ed132c..f6cd05eb39 100644 --- a/cg/meta/upload/nipt/nipt.py +++ b/cg/meta/upload/nipt/nipt.py @@ -11,7 +11,7 @@ from cg.apps.housekeeper.hk import HousekeeperAPI from cg.apps.tb import TrailblazerAPI -from cg.constants import Pipeline +from cg.constants import Workflow from cg.exc import HousekeeperFileMissingError, StatinaAPIHTTPError from cg.meta.upload.nipt.models import FlowCellQ30AndReads, StatinaUploadFiles from cg.models.cg_config import CGConfig @@ -105,7 +105,7 @@ def get_results_file_path(self, hk_results_file: str) -> Path: def get_all_upload_analyses(self) -> list[Analysis]: """Gets all nipt analyses that are ready to be uploaded""" - return self.status_db.get_latest_analysis_to_upload_for_pipeline(pipeline=Pipeline.FLUFFY) + return self.status_db.get_latest_analysis_to_upload_for_pipeline(pipeline=Workflow.FLUFFY) def upload_to_ftp_server(self, results_file: Path) -> None: """Upload the result file to the ftp server""" diff --git a/cg/meta/upload/scout/uploadscoutapi.py b/cg/meta/upload/scout/uploadscoutapi.py index 480be8c652..63d0d9be11 100644 --- a/cg/meta/upload/scout/uploadscoutapi.py +++ b/cg/meta/upload/scout/uploadscoutapi.py @@ -10,7 +10,7 @@ from cg.apps.lims import LimsAPI from cg.apps.madeline.api import MadelineAPI from cg.apps.scout.scoutapi import ScoutAPI -from cg.constants import HK_MULTIQC_HTML_TAG, Pipeline +from cg.constants import HK_MULTIQC_HTML_TAG, Workflow from cg.constants.constants import FileFormat, PrepCategory from cg.constants.scout import ScoutCustomCaseReportTags from cg.exc import CgDataError, HousekeeperBundleVersionMissingError @@ -115,10 +115,10 @@ def add_scout_config_to_hk( return file_obj def get_multiqc_html_report( - self, case_id: str, pipeline: Pipeline + self, case_id: str, pipeline: Workflow ) -> tuple[ScoutCustomCaseReportTags, File | None]: """Return a multiqc report for a case in Housekeeper.""" - if pipeline == Pipeline.MIP_RNA: + if pipeline == Workflow.MIP_RNA: return ( ScoutCustomCaseReportTags.MULTIQC_RNA, self.housekeeper.files(bundle=case_id, tags=HK_MULTIQC_HTML_TAG).first(), @@ -352,27 +352,27 @@ def upload_rna_junctions_to_scout(self, dry_run: bool, case_id: str) -> None: def get_config_builder(self, analysis, hk_version) -> ScoutConfigBuilder: config_builders = { - Pipeline.BALSAMIC: BalsamicConfigBuilder( + Workflow.BALSAMIC: BalsamicConfigBuilder( hk_version_obj=hk_version, analysis_obj=analysis, lims_api=self.lims ), - Pipeline.BALSAMIC_UMI: BalsamicUmiConfigBuilder( + Workflow.BALSAMIC_UMI: BalsamicUmiConfigBuilder( hk_version_obj=hk_version, analysis_obj=analysis, lims_api=self.lims ), - Pipeline.MIP_DNA: MipConfigBuilder( + Workflow.MIP_DNA: MipConfigBuilder( hk_version_obj=hk_version, analysis_obj=analysis, mip_analysis_api=self.mip_analysis_api, lims_api=self.lims, madeline_api=self.madeline_api, ), - Pipeline.MIP_RNA: MipConfigBuilder( + Workflow.MIP_RNA: MipConfigBuilder( hk_version_obj=hk_version, analysis_obj=analysis, mip_analysis_api=self.mip_analysis_api, lims_api=self.lims, madeline_api=self.madeline_api, ), - Pipeline.RNAFUSION: RnafusionConfigBuilder( + Workflow.RNAFUSION: RnafusionConfigBuilder( hk_version_obj=hk_version, analysis_obj=analysis, lims_api=self.lims ), } @@ -438,9 +438,9 @@ def filter_cases_related_to_dna_sample( if ( case.data_analysis in [ - Pipeline.MIP_DNA, - Pipeline.BALSAMIC, - Pipeline.BALSAMIC_UMI, + Workflow.MIP_DNA, + Workflow.BALSAMIC, + Workflow.BALSAMIC_UMI, ] and case.customer in collaborators ): diff --git a/cg/meta/workflow/analysis.py b/cg/meta/workflow/analysis.py index c322298c2a..26f8d8fbb2 100644 --- a/cg/meta/workflow/analysis.py +++ b/cg/meta/workflow/analysis.py @@ -9,7 +9,7 @@ from housekeeper.store.models import Bundle, Version from cg.apps.environ import environ_email -from cg.constants import EXIT_FAIL, EXIT_SUCCESS, Pipeline, Priority, SequencingFileTag +from cg.constants import EXIT_FAIL, EXIT_SUCCESS, Priority, SequencingFileTag, Workflow from cg.constants.constants import ( AnalysisType, CaseActions, @@ -46,7 +46,7 @@ class AnalysisAPI(MetaAPI): Parent class containing all methods that are either shared or overridden by other workflow APIs """ - def __init__(self, pipeline: Pipeline, config: CGConfig): + def __init__(self, pipeline: Workflow, config: CGConfig): super().__init__(config=config) self.pipeline = pipeline self._process = None diff --git a/cg/meta/workflow/balsamic.py b/cg/meta/workflow/balsamic.py index 38569e4463..675917ff2f 100644 --- a/cg/meta/workflow/balsamic.py +++ b/cg/meta/workflow/balsamic.py @@ -6,7 +6,7 @@ from housekeeper.store.models import File, Version from pydantic.v1 import EmailStr, ValidationError -from cg.constants import Pipeline +from cg.constants import Workflow from cg.constants.constants import FileFormat, SampleType from cg.constants.housekeeper_tags import BalsamicAnalysisTag from cg.constants.observations import ObservationsFileWildcards @@ -24,7 +24,6 @@ BalsamicWGSQCMetrics, ) from cg.models.cg_config import CGConfig -from cg.models.fastq import FastqFileMeta from cg.store.models import Case, CaseSample, Sample from cg.utils import Process from cg.utils.utils import build_command_from_dict, get_string_from_list_by_pattern @@ -42,7 +41,7 @@ class BalsamicAnalysisAPI(AnalysisAPI): def __init__( self, config: CGConfig, - pipeline: Pipeline = Pipeline.BALSAMIC, + pipeline: Workflow = Workflow.BALSAMIC, ): super().__init__(config=config, pipeline=pipeline) self.account: str = config.balsamic.slurm.account diff --git a/cg/meta/workflow/balsamic_pon.py b/cg/meta/workflow/balsamic_pon.py index d0abebbfaf..498f2eaf26 100644 --- a/cg/meta/workflow/balsamic_pon.py +++ b/cg/meta/workflow/balsamic_pon.py @@ -3,7 +3,7 @@ import logging from pathlib import Path -from cg.constants.constants import Pipeline +from cg.constants.constants import Workflow from cg.exc import BalsamicStartError from cg.meta.workflow.balsamic import BalsamicAnalysisAPI from cg.models.cg_config import CGConfig @@ -19,7 +19,7 @@ class BalsamicPonAnalysisAPI(BalsamicAnalysisAPI): def __init__( self, config: CGConfig, - pipeline: Pipeline = Pipeline.BALSAMIC_PON, + pipeline: Workflow = Workflow.BALSAMIC_PON, ): super().__init__(config=config, pipeline=pipeline) diff --git a/cg/meta/workflow/balsamic_qc.py b/cg/meta/workflow/balsamic_qc.py index 8bf9a41825..2dd274c332 100644 --- a/cg/meta/workflow/balsamic_qc.py +++ b/cg/meta/workflow/balsamic_qc.py @@ -2,7 +2,7 @@ import logging -from cg.constants import Pipeline +from cg.constants import Workflow from cg.meta.workflow.balsamic import BalsamicAnalysisAPI from cg.models.cg_config import CGConfig @@ -16,6 +16,6 @@ class BalsamicQCAnalysisAPI(BalsamicAnalysisAPI): def __init__( self, config: CGConfig, - pipeline: Pipeline = Pipeline.BALSAMIC_QC, + pipeline: Workflow = Workflow.BALSAMIC_QC, ): super().__init__(config=config, pipeline=pipeline) diff --git a/cg/meta/workflow/balsamic_umi.py b/cg/meta/workflow/balsamic_umi.py index b8554bbbee..d6fcaded8e 100644 --- a/cg/meta/workflow/balsamic_umi.py +++ b/cg/meta/workflow/balsamic_umi.py @@ -2,7 +2,7 @@ import logging -from cg.constants import Pipeline +from cg.constants import Workflow from cg.meta.workflow.balsamic import BalsamicAnalysisAPI from cg.models.cg_config import CGConfig @@ -16,6 +16,6 @@ class BalsamicUmiAnalysisAPI(BalsamicAnalysisAPI): def __init__( self, config: CGConfig, - pipeline: Pipeline = Pipeline.BALSAMIC_UMI, + pipeline: Workflow = Workflow.BALSAMIC_UMI, ): super().__init__(config=config, pipeline=pipeline) diff --git a/cg/meta/workflow/fluffy.py b/cg/meta/workflow/fluffy.py index f805c32140..666050107a 100644 --- a/cg/meta/workflow/fluffy.py +++ b/cg/meta/workflow/fluffy.py @@ -7,9 +7,11 @@ from pydantic import BaseModel from sqlalchemy.orm import Query -from cg.apps.demultiplex.sample_sheet.read_sample_sheet import get_sample_sheet_from_file +from cg.apps.demultiplex.sample_sheet.read_sample_sheet import ( + get_sample_sheet_from_file, +) from cg.apps.demultiplex.sample_sheet.sample_sheet_models import SampleSheet -from cg.constants import Pipeline +from cg.constants import Workflow from cg.constants.constants import FileFormat from cg.io.controller import WriteFile from cg.meta.workflow.analysis import AnalysisAPI @@ -72,7 +74,7 @@ class FluffyAnalysisAPI(AnalysisAPI): def __init__( self, config: CGConfig, - pipeline: Pipeline = Pipeline.FLUFFY, + pipeline: Workflow = Workflow.FLUFFY, ): self.root_dir = Path(config.fluffy.root_dir) LOG.info("Set root dir to %s", config.fluffy.root_dir) diff --git a/cg/meta/workflow/microsalt/microsalt.py b/cg/meta/workflow/microsalt/microsalt.py index 7532b4308b..150db3a0be 100644 --- a/cg/meta/workflow/microsalt/microsalt.py +++ b/cg/meta/workflow/microsalt/microsalt.py @@ -8,10 +8,10 @@ import click -from cg.constants import EXIT_FAIL, EXIT_SUCCESS, Pipeline, Priority +from cg.constants import EXIT_FAIL, EXIT_SUCCESS, Priority, Workflow from cg.constants.constants import FileExtensions from cg.constants.tb import AnalysisStatus -from cg.exc import CgDataError, MissingAnalysisDir +from cg.exc import CgDataError from cg.meta.workflow.analysis import AnalysisAPI from cg.meta.workflow.fastq import MicrosaltFastqHandler from cg.meta.workflow.microsalt.quality_controller import QualityController @@ -26,7 +26,7 @@ class MicrosaltAnalysisAPI(AnalysisAPI): """API to manage Microsalt Analyses""" - def __init__(self, config: CGConfig, pipeline: Pipeline = Pipeline.MICROSALT): + def __init__(self, config: CGConfig, pipeline: Workflow = Workflow.MICROSALT): super().__init__(pipeline, config) self.root_dir = config.microsalt.root self.queries_path = config.microsalt.queries_path diff --git a/cg/meta/workflow/mip.py b/cg/meta/workflow/mip.py index 50aee67d54..14bf80446d 100644 --- a/cg/meta/workflow/mip.py +++ b/cg/meta/workflow/mip.py @@ -5,7 +5,7 @@ from pydantic.v1 import ValidationError from cg.apps.mip.confighandler import ConfigHandler -from cg.constants import FileExtensions, GenePanelMasterList, Pipeline +from cg.constants import FileExtensions, GenePanelMasterList, Workflow from cg.constants.constants import FileFormat from cg.constants.housekeeper_tags import HkMipAnalysisTag from cg.exc import CgError @@ -39,7 +39,7 @@ class MipAnalysisAPI(AnalysisAPI): """The workflow is accessed through Trailblazer but cg provides additional conventions and hooks into the status database that makes managing analyses simpler""" - def __init__(self, config: CGConfig, pipeline: Pipeline): + def __init__(self, config: CGConfig, pipeline: Workflow): super().__init__(pipeline, config) @property diff --git a/cg/meta/workflow/mip_dna.py b/cg/meta/workflow/mip_dna.py index acca32dfb8..e611d3e35e 100644 --- a/cg/meta/workflow/mip_dna.py +++ b/cg/meta/workflow/mip_dna.py @@ -1,6 +1,4 @@ -from pathlib import Path - -from cg.constants import DEFAULT_CAPTURE_KIT, Pipeline +from cg.constants import DEFAULT_CAPTURE_KIT, Workflow from cg.constants.constants import AnalysisType from cg.constants.gene_panel import GENOME_BUILD_37 from cg.constants.pedigree import Pedigree @@ -11,7 +9,7 @@ class MipDNAAnalysisAPI(MipAnalysisAPI): - def __init__(self, config: CGConfig, pipeline: Pipeline = Pipeline.MIP_DNA): + def __init__(self, config: CGConfig, pipeline: Workflow = Workflow.MIP_DNA): super().__init__(config, pipeline) @property diff --git a/cg/meta/workflow/mip_rna.py b/cg/meta/workflow/mip_rna.py index 90f938cb7e..08f11dda21 100644 --- a/cg/meta/workflow/mip_rna.py +++ b/cg/meta/workflow/mip_rna.py @@ -1,4 +1,4 @@ -from cg.constants import Pipeline +from cg.constants import Workflow from cg.constants.gene_panel import GENOME_BUILD_38 from cg.constants.pedigree import Pedigree from cg.meta.workflow.mip import MipAnalysisAPI @@ -7,7 +7,7 @@ class MipRNAAnalysisAPI(MipAnalysisAPI): - def __init__(self, config: CGConfig, pipeline: Pipeline = Pipeline.MIP_RNA): + def __init__(self, config: CGConfig, pipeline: Workflow = Workflow.MIP_RNA): super().__init__(config, pipeline) @property diff --git a/cg/meta/workflow/mutant.py b/cg/meta/workflow/mutant.py index 4823e1b384..ac179e44ec 100644 --- a/cg/meta/workflow/mutant.py +++ b/cg/meta/workflow/mutant.py @@ -2,7 +2,7 @@ import shutil from pathlib import Path -from cg.constants import Pipeline, SequencingFileTag +from cg.constants import SequencingFileTag, Workflow from cg.constants.constants import FileFormat from cg.io.controller import WriteFile from cg.meta.workflow.analysis import AnalysisAPI @@ -19,7 +19,7 @@ class MutantAnalysisAPI(AnalysisAPI): def __init__( self, config: CGConfig, - pipeline: Pipeline = Pipeline.MUTANT, + pipeline: Workflow = Workflow.MUTANT, ): super().__init__(config=config, pipeline=pipeline) self.root_dir = config.mutant.root diff --git a/cg/meta/workflow/nf_analysis.py b/cg/meta/workflow/nf_analysis.py index 146603fabe..cde1f311b4 100644 --- a/cg/meta/workflow/nf_analysis.py +++ b/cg/meta/workflow/nf_analysis.py @@ -3,26 +3,24 @@ from pathlib import Path from typing import Any -from cg.constants import Pipeline -from cg.constants.constants import FileExtensions, FileFormat, WorkflowManager, MultiQC +from cg.constants import Workflow +from cg.constants.constants import FileExtensions, FileFormat, MultiQC, WorkflowManager from cg.constants.nextflow import NFX_WORK_DIR +from cg.constants.tb import AnalysisStatus +from cg.exc import CgError, MetricsQCError +from cg.io.controller import ReadFile, WriteFile from cg.io.yaml import write_yaml_nextflow_style from cg.meta.workflow.analysis import AnalysisAPI from cg.meta.workflow.nf_handlers import NextflowHandler, NfTowerHandler from cg.models.cg_config import CGConfig -from cg.models.fastq import FastqFileMeta -from cg.models.nf_analysis import FileDeliverable, PipelineDeliverables -from cg.models.rnafusion.rnafusion import CommandArgs -from cg.utils import Process from cg.models.deliverables.metric_deliverables import ( MetricsBase, -) -from cg.io.controller import ReadFile, WriteFile -from cg.models.deliverables.metric_deliverables import ( MetricsDeliverablesCondition, ) -from cg.exc import CgError, MetricsQCError -from cg.constants.tb import AnalysisStatus +from cg.models.fastq import FastqFileMeta +from cg.models.nf_analysis import FileDeliverable, PipelineDeliverables +from cg.models.rnafusion.rnafusion import CommandArgs +from cg.utils import Process LOG = logging.getLogger(__name__) @@ -30,9 +28,9 @@ class NfAnalysisAPI(AnalysisAPI): """Parent class for handling NF-core analyses.""" - def __init__(self, config: CGConfig, pipeline: Pipeline): + def __init__(self, config: CGConfig, pipeline: Workflow): super().__init__(config=config, pipeline=pipeline) - self.pipeline: Pipeline = pipeline + self.pipeline: Workflow = pipeline self.root_dir: str | None = None self.nfcore_pipeline_path: str | None = None self.references: str | None = None @@ -221,7 +219,7 @@ def _run_analysis_with_nextflow( conda_binary=self.conda_binary, launch_directory=self.get_case_path(case_id=case_id), ) - LOG.info("Pipeline will be executed using Nextflow") + LOG.info("Workflow will be executed using Nextflow") parameters: list[str] = NextflowHandler.get_nextflow_run_parameters( case_id=case_id, pipeline_path=self.nfcore_pipeline_path, @@ -248,13 +246,13 @@ def _run_analysis_with_tower( self, case_id: str, command_args: CommandArgs, dry_run: bool ) -> None: """Run analysis with given options using NF-Tower.""" - LOG.info("Pipeline will be executed using Tower") + LOG.info("Workflow will be executed using Tower") if command_args.resume: from_tower_id: int = command_args.id or NfTowerHandler.get_last_tower_id( case_id=case_id, trailblazer_config=self.get_job_ids_path(case_id=case_id), ) - LOG.info(f"Pipeline will be resumed from run with Tower id: {from_tower_id}.") + LOG.info(f"Workflow will be resumed from run with Tower id: {from_tower_id}.") parameters: list[str] = NfTowerHandler.get_tower_relaunch_parameters( from_tower_id=from_tower_id, command_args=command_args.dict() ) diff --git a/cg/meta/workflow/nf_handlers.py b/cg/meta/workflow/nf_handlers.py index 2190a66f16..38fca3f9e0 100644 --- a/cg/meta/workflow/nf_handlers.py +++ b/cg/meta/workflow/nf_handlers.py @@ -81,7 +81,7 @@ def get_tower_id(stdout_lines: Iterable) -> str: """Parse the stdout and return a workflow id. An example of the output to parse is: Case exists in status db Running RNAFUSION analysis for - Pipeline will be executed using tower + Workflow will be executed using tower Running command Workflow 1uxZE9JM7Tl58r submitted at [] workspace. diff --git a/cg/meta/workflow/raredisease.py b/cg/meta/workflow/raredisease.py index dea36a9119..3cacc4febb 100644 --- a/cg/meta/workflow/raredisease.py +++ b/cg/meta/workflow/raredisease.py @@ -3,7 +3,7 @@ import logging from pathlib import Path -from cg.constants import GenePanelMasterList, Pipeline +from cg.constants import GenePanelMasterList, Workflow from cg.constants.gene_panel import GENOME_BUILD_37 from cg.meta.workflow.analysis import add_gene_panel_combo from cg.meta.workflow.nf_analysis import NfAnalysisAPI @@ -19,7 +19,7 @@ class RarediseaseAnalysisAPI(NfAnalysisAPI): def __init__( self, config: CGConfig, - pipeline: Pipeline = Pipeline.RAREDISEASE, + pipeline: Workflow = Workflow.RAREDISEASE, ): super().__init__(config=config, pipeline=pipeline) diff --git a/cg/meta/workflow/rnafusion.py b/cg/meta/workflow/rnafusion.py index a24b8cf2b6..79702efc55 100644 --- a/cg/meta/workflow/rnafusion.py +++ b/cg/meta/workflow/rnafusion.py @@ -5,17 +5,15 @@ from typing import Any from cg import resources -from cg.constants import Pipeline +from cg.constants import Workflow from cg.constants.constants import FileFormat, Strandedness from cg.constants.nf_analysis import RNAFUSION_METRIC_CONDITIONS from cg.exc import MissingMetrics from cg.io.controller import ReadFile +from cg.io.json import read_json from cg.meta.workflow.nf_analysis import NfAnalysisAPI from cg.models.cg_config import CGConfig -from cg.models.deliverables.metric_deliverables import ( - MetricsBase, - MultiqcDataJson, -) +from cg.models.deliverables.metric_deliverables import MetricsBase, MultiqcDataJson from cg.models.fastq import FastqFileMeta from cg.models.nf_analysis import PipelineDeliverables from cg.models.rnafusion.rnafusion import ( @@ -24,7 +22,6 @@ RnafusionSampleSheetEntry, ) from cg.store.models import Case, Sample -from cg.io.json import read_json LOG = logging.getLogger(__name__) @@ -36,7 +33,7 @@ class RnafusionAnalysisAPI(NfAnalysisAPI): def __init__( self, config: CGConfig, - pipeline: Pipeline = Pipeline.RNAFUSION, + pipeline: Workflow = Workflow.RNAFUSION, ): super().__init__(config=config, pipeline=pipeline) self.root_dir: str = config.rnafusion.root diff --git a/cg/meta/workflow/taxprofiler.py b/cg/meta/workflow/taxprofiler.py index e81e6c108a..106452ea7f 100644 --- a/cg/meta/workflow/taxprofiler.py +++ b/cg/meta/workflow/taxprofiler.py @@ -4,21 +4,18 @@ from pathlib import Path from typing import Any -from cg.constants import Pipeline +from cg.constants import Workflow from cg.constants.sequencing import SequencingPlatform +from cg.io.json import read_json from cg.meta.workflow.nf_analysis import NfAnalysisAPI from cg.models.cg_config import CGConfig +from cg.models.deliverables.metric_deliverables import MetricsBase, MultiqcDataJson from cg.models.fastq import FastqFileMeta from cg.models.taxprofiler.taxprofiler import ( TaxprofilerParameters, TaxprofilerSampleSheetEntry, ) from cg.store.models import Case, Sample -from cg.models.deliverables.metric_deliverables import ( - MetricsBase, - MultiqcDataJson, -) -from cg.io.json import read_json LOG = logging.getLogger(__name__) @@ -30,7 +27,7 @@ class TaxprofilerAnalysisAPI(NfAnalysisAPI): def __init__( self, config: CGConfig, - pipeline: Pipeline = Pipeline.TAXPROFILER, + pipeline: Workflow = Workflow.TAXPROFILER, ): super().__init__(config=config, pipeline=pipeline) self.root_dir: str = config.taxprofiler.root diff --git a/cg/models/orders/constants.py b/cg/models/orders/constants.py index 2f2945166d..8e722f9986 100644 --- a/cg/models/orders/constants.py +++ b/cg/models/orders/constants.py @@ -1,20 +1,20 @@ from enum import StrEnum -from cg.constants.constants import Pipeline +from cg.constants.constants import Workflow class OrderType(StrEnum): - BALSAMIC: str = Pipeline.BALSAMIC - BALSAMIC_QC: str = Pipeline.BALSAMIC_QC - BALSAMIC_UMI: str = Pipeline.BALSAMIC_UMI - FASTQ: str = Pipeline.FASTQ - FLUFFY: str = Pipeline.FLUFFY + BALSAMIC: str = Workflow.BALSAMIC + BALSAMIC_QC: str = Workflow.BALSAMIC_QC + BALSAMIC_UMI: str = Workflow.BALSAMIC_UMI + FASTQ: str = Workflow.FASTQ + FLUFFY: str = Workflow.FLUFFY METAGENOME: str = "metagenome" - MICROSALT: str = Pipeline.MICROSALT - MIP_DNA: str = Pipeline.MIP_DNA - MIP_RNA: str = Pipeline.MIP_RNA + MICROSALT: str = Workflow.MICROSALT + MIP_DNA: str = Workflow.MIP_DNA + MIP_RNA: str = Workflow.MIP_RNA RML: str = "rml" - RNAFUSION: str = Pipeline.RNAFUSION + RNAFUSION: str = Workflow.RNAFUSION SARS_COV_2: str = "sars-cov-2" diff --git a/cg/models/orders/json_sample.py b/cg/models/orders/json_sample.py index 2d4440a38b..572784c51f 100644 --- a/cg/models/orders/json_sample.py +++ b/cg/models/orders/json_sample.py @@ -1,7 +1,7 @@ from pydantic import BeforeValidator, constr from typing_extensions import Annotated -from cg.constants import DataDelivery, Pipeline +from cg.constants import DataDelivery, Workflow from cg.models.orders.sample_base import OrderSample from cg.models.orders.validators.json_sample_validators import convert_well, join_list @@ -12,7 +12,7 @@ class JsonSample(OrderSample): concentration_ng_ul: str | None = None concentration_sample: str | None = None control: str | None = None - data_analysis: Pipeline = Pipeline.MIP_DNA + data_analysis: Workflow = Workflow.MIP_DNA data_delivery: DataDelivery = DataDelivery.SCOUT index: str | None = None panels: list[str] | None = None diff --git a/cg/models/orders/sample_base.py b/cg/models/orders/sample_base.py index 073f43bfe7..eb9219adfc 100644 --- a/cg/models/orders/sample_base.py +++ b/cg/models/orders/sample_base.py @@ -3,7 +3,7 @@ from pydantic import BaseModel, BeforeValidator, ConfigDict, constr from typing_extensions import Annotated -from cg.constants import DataDelivery, Pipeline +from cg.constants import DataDelivery, Workflow from cg.models.orders.validators.sample_base_validators import snake_case from cg.store.models import Application, Case, Customer, Pool, Sample @@ -60,7 +60,7 @@ class OrderSample(BaseModel): control: str | None = None customer: constr(max_length=Customer.internal_id.property.columns[0].type.length) | None = None custom_index: str | None = None - data_analysis: Pipeline + data_analysis: Workflow data_delivery: DataDelivery elution_buffer: str | None = None extraction_method: str | None = None diff --git a/cg/models/orders/samples.py b/cg/models/orders/samples.py index 6fee04fec0..681c9ccf21 100644 --- a/cg/models/orders/samples.py +++ b/cg/models/orders/samples.py @@ -1,7 +1,7 @@ from pydantic.v1 import BaseModel, constr, validator from cg.constants import DataDelivery -from cg.constants.constants import GenomeVersion, Pipeline +from cg.constants.constants import GenomeVersion, Workflow from cg.models.orders.order import OrderType from cg.models.orders.sample_base import ( NAME_PATTERN, @@ -33,7 +33,7 @@ class OrderInSample(BaseModel): application: constr(max_length=Application.tag.property.columns[0].type.length) comment: constr(max_length=Sample.comment.property.columns[0].type.length) | None skip_reception_control: bool | None = None - data_analysis: Pipeline + data_analysis: Workflow data_delivery: DataDelivery name: constr( regex=NAME_PATTERN, diff --git a/cg/models/report/validators.py b/cg/models/report/validators.py index b1144ce646..bed6a8797c 100644 --- a/cg/models/report/validators.py +++ b/cg/models/report/validators.py @@ -13,7 +13,7 @@ REPORT_GENDER, YES_FIELD, ) -from cg.constants.constants import Pipeline, PrepCategory +from cg.constants.constants import PrepCategory, Workflow from cg.constants.subject import Sex from cg.models.orders.constants import OrderType @@ -72,6 +72,6 @@ def get_prep_category_as_string(prep_category: PrepCategory | None) -> str: def get_analysis_type_as_string(analysis_type: str | None, info: ValidationInfo) -> str: """Return the analysis type as an accepted string value for the delivery report.""" - if analysis_type and Pipeline.BALSAMIC in info.data.get("pipeline"): + if analysis_type and Workflow.BALSAMIC in info.data.get("pipeline"): analysis_type: str = BALSAMIC_ANALYSIS_TYPE.get(analysis_type) return get_report_string(analysis_type) diff --git a/cg/server/admin.py b/cg/server/admin.py index df0335fcae..0de811dafb 100644 --- a/cg/server/admin.py +++ b/cg/server/admin.py @@ -9,7 +9,7 @@ from flask_dance.contrib.google import google from markupsafe import Markup -from cg.constants.constants import NG_UL_SUFFIX, CaseActions, DataDelivery, Pipeline +from cg.constants.constants import NG_UL_SUFFIX, CaseActions, DataDelivery, Workflow from cg.server.ext import db from cg.store.models import Sample from cg.utils.flask.enum import SelectEnumField @@ -197,7 +197,7 @@ class ApplicationLimitationsView(BaseView): column_searchable_list = ["application.tag"] column_editable_list = ["comment"] form_excluded_columns = ["created_at", "updated_at"] - form_extra_fields = {"pipeline": SelectEnumField(enum_class=Pipeline)} + form_extra_fields = {"pipeline": SelectEnumField(enum_class=Workflow)} create_modal = True edit_modal = True @@ -312,7 +312,7 @@ class CaseView(BaseView): "synopsis", ] form_extra_fields = { - "data_analysis": SelectEnumField(enum_class=Pipeline), + "data_analysis": SelectEnumField(enum_class=Workflow), "data_delivery": SelectEnumField(enum_class=DataDelivery), } @@ -437,7 +437,7 @@ class AnalysisView(BaseView): "case.internal_id", "case.name", ] - form_extra_fields = {"pipeline": SelectEnumField(enum_class=Pipeline)} + form_extra_fields = {"pipeline": SelectEnumField(enum_class=Workflow)} class OrganismView(BaseView): diff --git a/cg/server/dto/orders/orders_response.py b/cg/server/dto/orders/orders_response.py index a3379d5019..5a9cbaecc0 100644 --- a/cg/server/dto/orders/orders_response.py +++ b/cg/server/dto/orders/orders_response.py @@ -1,6 +1,6 @@ from pydantic import BaseModel -from cg.constants import Pipeline +from cg.constants import Workflow class Order(BaseModel): @@ -8,7 +8,7 @@ class Order(BaseModel): ticket_id: int order_date: str order_id: int - workflow: Pipeline + workflow: Workflow class OrdersResponse(BaseModel): diff --git a/cg/services/delivery_message/utils.py b/cg/services/delivery_message/utils.py index 869af412e7..b0ac23438b 100644 --- a/cg/services/delivery_message/utils.py +++ b/cg/services/delivery_message/utils.py @@ -1,4 +1,4 @@ -from cg.constants.constants import DataDelivery, MicrosaltAppTags, Pipeline +from cg.constants.constants import DataDelivery, MicrosaltAppTags, Workflow from cg.services.delivery_message.messages import ( AnalysisScoutMessage, CovidMessage, @@ -22,10 +22,10 @@ def get_message(case: Case) -> str: def get_message_strategy(case: Case) -> DeliveryMessage: - if case.data_analysis == Pipeline.MICROSALT: + if case.data_analysis == Workflow.MICROSALT: return get_microsalt_message_strategy(case) - if case.data_analysis == Pipeline.MUTANT: + if case.data_analysis == Workflow.MUTANT: return CovidMessage() message_strategy: DeliveryMessage = get_message_strategy_from_data_delivery(case) diff --git a/cg/store/crud/create.py b/cg/store/crud/create.py index d770d181d4..471a9b97f0 100644 --- a/cg/store/crud/create.py +++ b/cg/store/crud/create.py @@ -1,14 +1,13 @@ import logging from datetime import datetime -from cg.models.orders.order import OrderIn -from cg.models.orders.orderform_schema import Orderform -from cg.store.database import get_session import petname -from cg.constants import DataDelivery, FlowCellStatus, Pipeline, Priority +from cg.constants import DataDelivery, FlowCellStatus, Priority, Workflow from cg.constants.archiving import PDC_ARCHIVE_LOCATION +from cg.models.orders.order import OrderIn from cg.store.base import BaseHandler +from cg.store.database import get_session from cg.store.models import ( Analysis, Application, @@ -204,7 +203,7 @@ def add_sample( def add_case( self, - data_analysis: Pipeline, + data_analysis: Workflow, data_delivery: DataDelivery, name: str, ticket: str, @@ -268,7 +267,7 @@ def add_flow_cell( def add_analysis( self, - pipeline: Pipeline, + pipeline: Workflow, version: str = None, completed_at: datetime = None, primary: bool = False, diff --git a/cg/store/crud/read.py b/cg/store/crud/read.py index 762861f56a..a473f99c81 100644 --- a/cg/store/crud/read.py +++ b/cg/store/crud/read.py @@ -8,7 +8,7 @@ from sqlalchemy.orm import Query, Session -from cg.constants import FlowCellStatus, Pipeline +from cg.constants import FlowCellStatus, Workflow from cg.constants.constants import CaseActions, CustomerId, PrepCategory, SampleType from cg.exc import CaseNotFoundError, CgError from cg.store.base import BaseHandler @@ -132,7 +132,7 @@ def get_application_limitations_by_tag(self, tag: str) -> list[ApplicationLimita ).all() def get_application_limitation_by_tag_and_pipeline( - self, tag: str, pipeline: Pipeline + self, tag: str, pipeline: Workflow ) -> ApplicationLimitations | None: """Return an application limitation given the application tag and pipeline.""" filter_functions: list[ApplicationLimitationsFilter] = [ @@ -788,7 +788,7 @@ def verify_case_exists(self, case_internal_id: str) -> None: raise CgError LOG.info(f"Case {case_internal_id} exists in Status DB") - def get_running_cases_in_pipeline(self, pipeline: Pipeline) -> list[Case]: + def get_running_cases_in_pipeline(self, pipeline: Workflow) -> list[Case]: """Return all running cases in a pipeline.""" return apply_case_filter( filter_functions=[CaseFilter.FILTER_WITH_PIPELINE, CaseFilter.FILTER_IS_RUNNING], @@ -1026,7 +1026,7 @@ def get_families_with_samples(self) -> Query: return self._get_join_cases_with_samples_query() def cases_to_analyze( - self, pipeline: Pipeline = None, threshold: bool = False, limit: int = None + self, pipeline: Workflow = None, threshold: bool = False, limit: int = None ) -> list[Case]: """Returns a list if cases ready to be analyzed or set to be reanalyzed.""" case_filter_functions: list[CaseFilter] = [ @@ -1455,7 +1455,7 @@ def _is_rerun( or (samples_sequenced_at and samples_sequenced_at < case_obj.ordered_at) ) - def get_analyses_to_upload(self, pipeline: Pipeline = None) -> list[Analysis]: + def get_analyses_to_upload(self, pipeline: Workflow = None) -> list[Analysis]: """Return analyses that have not been uploaded.""" analysis_filter_functions: list[AnalysisFilter] = [ AnalysisFilter.FILTER_WITH_PIPELINE, @@ -1471,7 +1471,7 @@ def get_analyses_to_upload(self, pipeline: Pipeline = None) -> list[Analysis]: ).all() def get_analyses_to_clean( - self, before: datetime = datetime.now(), pipeline: Pipeline = None + self, before: datetime = datetime.now(), pipeline: Workflow = None ) -> list[Analysis]: """Return analyses that haven't been cleaned.""" filter_functions: list[AnalysisFilter] = [ @@ -1491,7 +1491,7 @@ def get_analyses_to_clean( def get_analyses_for_case_and_pipeline_started_at_before( self, - pipeline: Pipeline, + pipeline: Workflow, started_at_before: datetime, case_internal_id: str, ) -> list[Analysis]: @@ -1531,7 +1531,7 @@ def get_analyses_for_case_started_at_before( ).all() def get_analyses_for_pipeline_started_at_before( - self, pipeline: Pipeline, started_at_before: datetime + self, pipeline: Workflow, started_at_before: datetime ) -> list[Analysis]: """Return all analyses for a pipeline started before a certain date.""" filter_functions: list[AnalysisFilter] = [ @@ -1553,7 +1553,7 @@ def get_analyses_started_at_before(self, started_at_before: datetime) -> list[An started_at_date=started_at_before, ).all() - def observations_to_upload(self, pipeline: Pipeline = None) -> Query: + def observations_to_upload(self, pipeline: Workflow = None) -> Query: """Return observations that have not been uploaded.""" case_filter_functions: list[CaseFilter] = [ CaseFilter.FILTER_WITH_LOQUSDB_SUPPORTED_PIPELINE, @@ -1568,7 +1568,7 @@ def observations_to_upload(self, pipeline: Pipeline = None) -> Query: filter_functions=[SampleFilter.FILTER_WITHOUT_LOQUSDB_ID], samples=records ) - def observations_uploaded(self, pipeline: Pipeline = None) -> Query: + def observations_uploaded(self, pipeline: Workflow = None) -> Query: """Return observations that have been uploaded.""" records: Query = apply_case_filter( filter_functions=[CaseFilter.FILTER_WITH_LOQUSDB_SUPPORTED_PIPELINE], @@ -1583,7 +1583,7 @@ def observations_uploaded(self, pipeline: Pipeline = None) -> Query: def get_analyses(self) -> list[Analysis]: return self._get_query(table=Analysis).all() - def get_analyses_to_deliver_for_pipeline(self, pipeline: Pipeline = None) -> list[Analysis]: + def get_analyses_to_deliver_for_pipeline(self, pipeline: Workflow = None) -> list[Analysis]: """Return analyses that have been uploaded but not delivered.""" analyses: Query = apply_sample_filter( samples=self._get_join_analysis_sample_family_query(), @@ -1598,7 +1598,7 @@ def get_analyses_to_deliver_for_pipeline(self, pipeline: Pipeline = None) -> lis analyses=analyses, filter_functions=filter_functions, pipeline=pipeline ).all() - def analyses_to_delivery_report(self, pipeline: Pipeline = None) -> Query: + def analyses_to_delivery_report(self, pipeline: Workflow = None) -> Query: """Return analyses that need a delivery report to be regenerated.""" records: Query = apply_case_filter( filter_functions=[CaseFilter.FILTER_REPORT_SUPPORTED], @@ -1615,7 +1615,7 @@ def analyses_to_delivery_report(self, pipeline: Pipeline = None) -> Query: filter_functions=analysis_filter_functions, analyses=records, pipeline=pipeline ) - def analyses_to_upload_delivery_reports(self, pipeline: Pipeline = None) -> Query: + def analyses_to_upload_delivery_reports(self, pipeline: Workflow = None) -> Query: """Return analyses that need a delivery report to be uploaded.""" records: Query = apply_case_filter( filter_functions=[CaseFilter.FILTER_WITH_SCOUT_DELIVERY], diff --git a/cg/store/filters/status_analysis_filters.py b/cg/store/filters/status_analysis_filters.py index f5dc158ff3..499a742c0f 100644 --- a/cg/store/filters/status_analysis_filters.py +++ b/cg/store/filters/status_analysis_filters.py @@ -5,7 +5,7 @@ from sqlalchemy.orm import Query from cg.constants import REPORT_SUPPORTED_PIPELINES -from cg.constants.constants import VALID_DATA_IN_PRODUCTION, Pipeline +from cg.constants.constants import VALID_DATA_IN_PRODUCTION, Workflow from cg.store.models import Analysis, Case @@ -14,7 +14,7 @@ def filter_valid_analyses_in_production(analyses: Query, **kwargs) -> Query: return analyses.filter(VALID_DATA_IN_PRODUCTION < Analysis.completed_at) -def filter_analyses_with_pipeline(analyses: Query, pipeline: Pipeline = None, **kwargs) -> Query: +def filter_analyses_with_pipeline(analyses: Query, pipeline: Workflow = None, **kwargs) -> Query: """Return analyses with supplied pipeline.""" return analyses.filter(Analysis.pipeline == str(pipeline)) if pipeline else analyses @@ -45,7 +45,7 @@ def filter_analyses_without_delivery_report(analyses: Query, **kwargs) -> Query: def filter_report_analyses_by_pipeline( - analyses: Query, pipeline: Pipeline = None, **kwargs + analyses: Query, pipeline: Workflow = None, **kwargs ) -> Query: """Return the delivery report related analyses associated to the provided or supported pipelines.""" return ( @@ -93,7 +93,7 @@ def filter_analysis_case_action_is_none(analyses: Query, **kwargs) -> Query: def apply_analysis_filter( filter_functions: list[Callable], analyses: Query, - pipeline: Pipeline = None, + pipeline: Workflow = None, case_entry_id: int = None, completed_at_date: datetime = None, started_at_date: datetime = None, diff --git a/cg/store/filters/status_application_limitations_filters.py b/cg/store/filters/status_application_limitations_filters.py index 526c2903f7..69f7e8cb56 100644 --- a/cg/store/filters/status_application_limitations_filters.py +++ b/cg/store/filters/status_application_limitations_filters.py @@ -3,7 +3,7 @@ from sqlalchemy.orm import Query -from cg.constants import Pipeline +from cg.constants import Workflow from cg.store.models import Application, ApplicationLimitations @@ -15,7 +15,7 @@ def filter_application_limitations_by_tag( def filter_application_limitations_by_pipeline( - application_limitations: Query, pipeline: Pipeline, **kwargs + application_limitations: Query, pipeline: Workflow, **kwargs ) -> Query: """Return application limitations by pipeline.""" return application_limitations.filter(ApplicationLimitations.pipeline == pipeline) @@ -25,7 +25,7 @@ def apply_application_limitations_filter( filter_functions: list[Callable], application_limitations: Query, tag: str = None, - pipeline: Pipeline = None, + pipeline: Workflow = None, ) -> Query: """Apply filtering functions to the application limitations queries and return filtered results.""" for filter_function in filter_functions: diff --git a/cg/store/filters/status_case_filters.py b/cg/store/filters/status_case_filters.py index da020aa42f..231523f01a 100644 --- a/cg/store/filters/status_case_filters.py +++ b/cg/store/filters/status_case_filters.py @@ -6,7 +6,7 @@ from sqlalchemy.orm import Query from cg.constants import REPORT_SUPPORTED_DATA_DELIVERY -from cg.constants.constants import CaseActions, DataDelivery, Pipeline +from cg.constants.constants import CaseActions, DataDelivery, Workflow from cg.constants.observations import ( LOQUSDB_BALSAMIC_SEQUENCING_METHODS, LOQUSDB_MIP_SEQUENCING_METHODS, @@ -121,13 +121,13 @@ def filter_cases_not_analysed(cases: Query, **kwargs) -> Query: return cases.filter(and_(not_analyzed_condition, not_in_progress_condition)) -def filter_cases_with_pipeline(cases: Query, pipeline: Pipeline = None, **kwargs) -> Query: +def filter_cases_with_pipeline(cases: Query, pipeline: Workflow = None, **kwargs) -> Query: """Filter cases with pipeline.""" return cases.filter(Case.data_analysis == pipeline) if pipeline else cases def filter_cases_with_loqusdb_supported_pipeline( - cases: Query, pipeline: Pipeline = None, **kwargs + cases: Query, pipeline: Workflow = None, **kwargs ) -> Query: """Filter Loqusdb related cases with pipeline.""" records: Query = ( @@ -139,12 +139,12 @@ def filter_cases_with_loqusdb_supported_pipeline( def filter_cases_with_loqusdb_supported_sequencing_method( - cases: Query, pipeline: Pipeline = None, **kwargs + cases: Query, pipeline: Workflow = None, **kwargs ) -> Query: """Filter cases with Loqusdb supported sequencing method.""" supported_sequencing_methods = { - Pipeline.MIP_DNA: LOQUSDB_MIP_SEQUENCING_METHODS, - Pipeline.BALSAMIC: LOQUSDB_BALSAMIC_SEQUENCING_METHODS, + Workflow.MIP_DNA: LOQUSDB_MIP_SEQUENCING_METHODS, + Workflow.BALSAMIC: LOQUSDB_BALSAMIC_SEQUENCING_METHODS, } return ( cases.filter(Application.prep_category.in_(supported_sequencing_methods[pipeline])) @@ -214,7 +214,7 @@ def apply_case_filter( name: str | None = None, name_search: str | None = None, order_date: datetime | None = None, - pipeline: Pipeline | None = None, + pipeline: Workflow | None = None, pipeline_search: str | None = None, priority: str | None = None, ticket_id: str | None = None, diff --git a/cg/store/models.py b/cg/store/models.py index 70562fbebf..972f2bc700 100644 --- a/cg/store/models.py +++ b/cg/store/models.py @@ -12,8 +12,8 @@ STATUS_OPTIONS, DataDelivery, FlowCellStatus, - Pipeline, Priority, + Workflow, ) from cg.constants.archiving import PDC_ARCHIVE_LOCATION from cg.constants.constants import CONTROL_OPTIONS, CaseActions, PrepCategory @@ -187,7 +187,7 @@ class ApplicationLimitations(Model): id = Column(types.Integer, primary_key=True) application_id = Column(ForeignKey(Application.id), nullable=False) - pipeline = Column(types.Enum(*list(Pipeline)), nullable=False) + pipeline = Column(types.Enum(*list(Workflow)), nullable=False) limitations = Column(types.Text) comment = Column(types.Text) created_at = Column(types.DateTime, default=dt.datetime.now) @@ -206,7 +206,7 @@ class Analysis(Model): __tablename__ = "analysis" id = Column(types.Integer, primary_key=True) - pipeline = Column(types.Enum(*list(Pipeline))) + pipeline = Column(types.Enum(*list(Workflow))) pipeline_version = Column(types.String(32)) started_at = Column(types.DateTime) completed_at = Column(types.DateTime) @@ -387,7 +387,7 @@ class Case(Model, PriorityMixin): created_at = Column(types.DateTime, default=dt.datetime.now) customer_id = Column(ForeignKey("customer.id", ondelete="CASCADE"), nullable=False) customer = orm.relationship(Customer, foreign_keys=[customer_id]) - data_analysis = Column(types.Enum(*list(Pipeline))) + data_analysis = Column(types.Enum(*list(Workflow))) data_delivery = Column(types.Enum(*list(DataDelivery))) id = Column(types.Integer, primary_key=True) internal_id = Column(types.String(32), unique=True, nullable=False) @@ -500,7 +500,7 @@ def get_delivery_arguments(self) -> set[str]: delivery_arguments: set[str] = set() requested_deliveries: list[str] = re.split("[-_]", self.data_delivery) delivery_per_pipeline_map: dict[str, str] = { - DataDelivery.FASTQ: Pipeline.FASTQ, + DataDelivery.FASTQ: Workflow.FASTQ, DataDelivery.ANALYSIS_FILES: self.data_analysis, } for data_delivery, pipeline in delivery_per_pipeline_map.items(): @@ -886,7 +886,7 @@ class Order(Model): customer = orm.relationship(Customer, foreign_keys=[customer_id]) order_date = Column(types.DateTime, nullable=False, default=dt.datetime.now()) ticket_id = Column(types.Integer, nullable=False, unique=True, index=True) - workflow = Column(types.Enum(*tuple(Pipeline)), nullable=False) + workflow = Column(types.Enum(*tuple(Workflow)), nullable=False) def to_dict(self): return to_dict(model_instance=self) diff --git a/poetry.lock b/poetry.lock index 305bb29d0b..0f24b4e462 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2416,4 +2416,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = ">=3.9,<3.13" -content-hash = "1e458340cbae9afa171074c3b957e6044c14f095ba35391780ee3fc17532951f" +content-hash = "b90b719a408285c2ca434d1e6d972986c2c3c4fad55edf429eb6b193b115bfc2" diff --git a/tests/apps/orderform/test_excel_orderform_parser.py b/tests/apps/orderform/test_excel_orderform_parser.py index 3e6af3aa64..73808619da 100644 --- a/tests/apps/orderform/test_excel_orderform_parser.py +++ b/tests/apps/orderform/test_excel_orderform_parser.py @@ -1,7 +1,7 @@ from pathlib import Path from cg.apps.orderform.excel_orderform_parser import ExcelOrderformParser -from cg.constants import Pipeline +from cg.constants import Workflow from cg.models.orders.excel_sample import ExcelSample from cg.models.orders.order import OrderType from cg.models.orders.orderform_schema import Orderform @@ -166,7 +166,7 @@ def test_parse_mip_orderform(mip_orderform: str, nr_samples_mip_orderform: int): assert len(order_form_parser.samples) == nr_samples_mip_orderform # THEN assert that the project type is correct - assert order_form_parser.project_type == str(Pipeline.MIP_DNA) + assert order_form_parser.project_type == Workflow.MIP_DNA def test_parse_rml_orderform(rml_orderform: str, nr_samples_rml_orderform: int): @@ -207,7 +207,7 @@ def test_parse_fastq_orderform(fastq_orderform: str, nr_samples_fastq_orderform: assert len(order_form_parser.samples) == nr_samples_fastq_orderform # THEN it should determine the project type - assert order_form_parser.project_type == str(Pipeline.FASTQ) + assert order_form_parser.project_type == Workflow.FASTQ # THEN it should determine the correct customer should have been parsed assert order_form_parser.customer_id == "cust000" diff --git a/tests/cli/add/test_cli_add_family.py b/tests/cli/add/test_cli_add_family.py index 48e2ca1249..7fc3a8831c 100644 --- a/tests/cli/add/test_cli_add_family.py +++ b/tests/cli/add/test_cli_add_family.py @@ -3,13 +3,13 @@ from click.testing import CliRunner from cg.cli.add import add -from cg.constants import DataDelivery, Pipeline +from cg.constants import DataDelivery, Workflow from cg.models.cg_config import CGConfig from cg.store.models import Case, Customer, Panel from cg.store.store import Store from tests.store_helpers import StoreHelpers -CLI_OPTION_ANALYSIS = Pipeline.BALSAMIC_UMI +CLI_OPTION_ANALYSIS = Workflow.BALSAMIC_UMI CLI_OPTION_DELIVERY = DataDelivery.FASTQ_QC diff --git a/tests/cli/clean/conftest.py b/tests/cli/clean/conftest.py index 46ed85663e..f065402b58 100644 --- a/tests/cli/clean/conftest.py +++ b/tests/cli/clean/conftest.py @@ -6,7 +6,7 @@ import pytest from cg.apps.housekeeper.hk import HousekeeperAPI -from cg.constants import Pipeline +from cg.constants import Workflow from cg.meta.workflow.balsamic import BalsamicAnalysisAPI from cg.meta.workflow.microsalt import MicrosaltAnalysisAPI from cg.models.cg_config import CGConfig @@ -53,7 +53,7 @@ def clean_context( store=store, internal_id=balsamic_case_clean, name=balsamic_case_clean, - data_analysis=Pipeline.BALSAMIC, + data_analysis=Workflow.BALSAMIC, ) sample_case_to_clean = helpers.add_sample( store, application_type="wgs", is_tumour=True, internal_id=balsamic_case_clean @@ -63,7 +63,7 @@ def clean_context( helpers.add_analysis( store, case=case_to_clean, - pipeline=Pipeline.BALSAMIC, + pipeline=Workflow.BALSAMIC, started_at=timestamp_yesterday, uploaded_at=timestamp_yesterday, cleaned_at=None, @@ -75,7 +75,7 @@ def clean_context( store=store, internal_id=balsamic_case_not_clean, name=balsamic_case_not_clean, - data_analysis=Pipeline.BALSAMIC, + data_analysis=Workflow.BALSAMIC, ) case_to_not_clean.action = "running" store.session.commit() @@ -88,7 +88,7 @@ def clean_context( helpers.add_analysis( store, case=case_to_not_clean, - pipeline=Pipeline.BALSAMIC, + pipeline=Workflow.BALSAMIC, started_at=timestamp_yesterday, uploaded_at=timestamp_yesterday, cleaned_at=None, @@ -150,7 +150,7 @@ def clean_context_microsalt( store=store, internal_id=microsalt_case_clean, name=microsalt_case_clean, - data_analysis=Pipeline.MICROSALT, + data_analysis=Workflow.MICROSALT, ) sample_case_to_clean = helpers.add_sample(store, internal_id=microsalt_case_clean) @@ -159,7 +159,7 @@ def clean_context_microsalt( helpers.add_analysis( store, case=case_to_clean, - pipeline=Pipeline.MICROSALT, + pipeline=Workflow.MICROSALT, started_at=timestamp_yesterday, uploaded_at=timestamp_yesterday, cleaned_at=None, @@ -177,7 +177,7 @@ def clean_context_microsalt( store=store, internal_id=microsalt_case_clean_dry, name=microsalt_case_clean_dry, - data_analysis=Pipeline.MICROSALT, + data_analysis=Workflow.MICROSALT, ) sample_case_to_not_clean = helpers.add_sample(store, internal_id=microsalt_case_clean_dry) @@ -186,7 +186,7 @@ def clean_context_microsalt( helpers.add_analysis( store, case=case_to_clean_dry_run, - pipeline=Pipeline.MICROSALT, + pipeline=Workflow.MICROSALT, started_at=timestamp_yesterday, uploaded_at=timestamp_yesterday, cleaned_at=None, diff --git a/tests/cli/clean/test_balsamic_clean.py b/tests/cli/clean/test_balsamic_clean.py index 7ebbaaad8e..7b1fef0f4c 100644 --- a/tests/cli/clean/test_balsamic_clean.py +++ b/tests/cli/clean/test_balsamic_clean.py @@ -8,7 +8,7 @@ from cg.apps.tb import TrailblazerAPI from cg.cli.workflow.commands import clean_run_dir, past_run_dirs -from cg.constants import Pipeline +from cg.constants import Workflow from cg.models.cg_config import CGConfig from tests.store_helpers import StoreHelpers @@ -82,7 +82,7 @@ def test_dry_run( base_store = clean_context.status_db helpers.add_analysis( base_store, - pipeline=Pipeline.BALSAMIC, + pipeline=Workflow.BALSAMIC, started_at=timestamp_yesterday, uploaded_at=timestamp_yesterday, cleaned_at=None, @@ -101,7 +101,7 @@ def test_dry_run( assert result.exit_code == EXIT_SUCCESS assert "Would have deleted" in caplog.text assert balsamic_case_clean in caplog.text - assert analysis_to_clean in base_store.get_analyses_to_clean(pipeline=Pipeline.BALSAMIC) + assert analysis_to_clean in base_store.get_analyses_to_clean(pipeline=Workflow.BALSAMIC) def test_cleaned_at_valid( diff --git a/tests/cli/clean/test_hk_case_bundle_files.py b/tests/cli/clean/test_hk_case_bundle_files.py index 4644df7640..22c228e33b 100644 --- a/tests/cli/clean/test_hk_case_bundle_files.py +++ b/tests/cli/clean/test_hk_case_bundle_files.py @@ -4,7 +4,7 @@ from click.testing import CliRunner from cg.cli.clean import hk_case_bundle_files -from cg.constants.constants import Pipeline +from cg.constants.constants import Workflow from cg.constants.housekeeper_tags import WORKFLOW_PROTECTED_TAGS from cg.models.cg_config import CGConfig from cg.store.models import Analysis @@ -56,7 +56,7 @@ def test_clean_hk_case_files_too_old(cli_runner: CliRunner, clean_context: CGCon # THEN it should be successful assert result.exit_code == 0 # THEN it should report not having cleaned anything - assert f"Process freed 0.0 GB" in caplog.text + assert "Process freed 0.0 GB" in caplog.text def test_clean_hk_case_files_single_analysis( @@ -72,7 +72,7 @@ def test_clean_hk_case_files_single_analysis( store: Store = context.status_db days_ago: int = 1 date_days_ago: dt.datetime = get_date_days_ago(days_ago) - pipeline: Pipeline = Pipeline.MIP_DNA + pipeline: Workflow = Workflow.MIP_DNA analysis: Analysis = helpers.add_analysis( store=store, started_at=date_days_ago, completed_at=date_days_ago, pipeline=pipeline @@ -111,7 +111,7 @@ def test_clean_hk_case_files_analysis_with_protected_tag( store: Store = context.status_db days_ago: int = 1 date_days_ago: dt.datetime = get_date_days_ago(days_ago) - pipeline: Pipeline = Pipeline.MIP_DNA + pipeline: Workflow = Workflow.MIP_DNA analysis: Analysis = helpers.add_analysis( store=store, started_at=date_days_ago, completed_at=date_days_ago, pipeline=pipeline diff --git a/tests/cli/clean/test_microbial_clean.py b/tests/cli/clean/test_microbial_clean.py index c7eee7e507..cc4007107c 100644 --- a/tests/cli/clean/test_microbial_clean.py +++ b/tests/cli/clean/test_microbial_clean.py @@ -6,7 +6,7 @@ from cg.apps.tb import TrailblazerAPI from cg.cli.workflow.commands import clean_run_dir -from cg.constants import Pipeline +from cg.constants import Workflow from cg.models.cg_config import CGConfig from tests.store_helpers import StoreHelpers @@ -29,7 +29,7 @@ def test_dry_run( base_store = clean_context_microsalt.status_db helpers.add_analysis( base_store, - pipeline=Pipeline.MICROSALT, + pipeline=Workflow.MICROSALT, started_at=timestamp_yesterday, uploaded_at=timestamp_yesterday, cleaned_at=None, @@ -58,7 +58,7 @@ def test_dry_run( # THEN the analysis should still be in the analyses_to_clean query since this is a dry-ryn assert analysis_to_clean.cleaned_at == None - assert analysis_to_clean in base_store.get_analyses_to_clean(pipeline=Pipeline.MICROSALT) + assert analysis_to_clean in base_store.get_analyses_to_clean(pipeline=Workflow.MICROSALT) def test_clean_run( @@ -77,7 +77,7 @@ def test_clean_run( base_store = clean_context_microsalt.status_db helpers.add_analysis( base_store, - pipeline=Pipeline.MICROSALT, + pipeline=Workflow.MICROSALT, started_at=timestamp_yesterday, uploaded_at=timestamp_yesterday, cleaned_at=None, @@ -106,4 +106,4 @@ def test_clean_run( # THEN the analysis should no longer be in the analyses_to_clean query assert isinstance(analysis_to_clean.cleaned_at, dt.datetime) - assert analysis_to_clean not in base_store.get_analyses_to_clean(pipeline=Pipeline.MICROSALT) + assert analysis_to_clean not in base_store.get_analyses_to_clean(pipeline=Workflow.MICROSALT) diff --git a/tests/cli/compress/test_cli_compress_fastq.py b/tests/cli/compress/test_cli_compress_fastq.py index 85a0c56f6a..865dbbbe65 100644 --- a/tests/cli/compress/test_cli_compress_fastq.py +++ b/tests/cli/compress/test_cli_compress_fastq.py @@ -6,7 +6,7 @@ from click.testing import CliRunner from cg.cli.compress.fastq import fastq_cmd, get_cases_to_process -from cg.constants import Pipeline +from cg.constants import Workflow from cg.models.cg_config import CGConfig from cg.store.models import Case from cg.store.store import Store @@ -32,7 +32,7 @@ def test_get_cases_to_process( store=status_db, name=case_id, internal_id=case_id, - data_analysis=Pipeline.MIP_DNA, + data_analysis=Workflow.MIP_DNA, action=None, ) valid_compressable_case.created_at = dt.datetime.now() - dt.timedelta(days=1000) @@ -141,7 +141,7 @@ def test_compress_fastq_cli_case_id( store=status_db, name=case_id, internal_id=case_id, - data_analysis=Pipeline.MIP_DNA, + data_analysis=Workflow.MIP_DNA, action=None, ) valid_compressable_case.created_at = dt.datetime.now() - dt.timedelta(days=1000) diff --git a/tests/cli/generate/report/conftest.py b/tests/cli/generate/report/conftest.py index 599bc651bf..36a8d4b137 100644 --- a/tests/cli/generate/report/conftest.py +++ b/tests/cli/generate/report/conftest.py @@ -4,7 +4,7 @@ import pytest from cg.cli.generate.report.base import generate_delivery_report -from cg.constants import Pipeline +from cg.constants import Workflow from cg.models.cg_config import CGConfig from tests.mocks.report import MockMipDNAAnalysisAPI, MockMipDNAReportAPI @@ -27,7 +27,7 @@ def mip_dna_context(cg_context, helpers, case_id, real_housekeeper_api) -> CGCon ) case = helpers.add_case( store=store, - data_analysis=Pipeline.MIP_DNA, + data_analysis=Workflow.MIP_DNA, internal_id=case_id, ) sample = helpers.add_sample( @@ -36,7 +36,7 @@ def mip_dna_context(cg_context, helpers, case_id, real_housekeeper_api) -> CGCon helpers.add_analysis( store=store, case=case, - pipeline=Pipeline.MIP_DNA, + pipeline=Workflow.MIP_DNA, delivery_reported_at=datetime.now(), started_at=datetime.now(), ) diff --git a/tests/cli/generate/report/test_utils.py b/tests/cli/generate/report/test_utils.py index 2ebd6eb8d1..1717801fd4 100644 --- a/tests/cli/generate/report/test_utils.py +++ b/tests/cli/generate/report/test_utils.py @@ -11,7 +11,7 @@ get_report_api_pipeline, get_report_case, ) -from cg.constants import Pipeline +from cg.constants import Workflow from cg.meta.report.balsamic_umi import BalsamicUmiReportAPI from tests.mocks.report import MockMipDNAReportAPI @@ -58,7 +58,7 @@ def test_get_report_api_pipeline(delivery_report_click_context): """Tests API assignment given a specific pipeline""" # GIVEN a click context and a specific pipeline - pipeline = Pipeline.BALSAMIC_UMI + pipeline = Workflow.BALSAMIC_UMI # WHEN validating a report api report_api = get_report_api_pipeline(delivery_report_click_context, pipeline) diff --git a/tests/cli/set/test_cli_set_case.py b/tests/cli/set/test_cli_set_case.py index 287d1143a0..e482310b10 100644 --- a/tests/cli/set/test_cli_set_case.py +++ b/tests/cli/set/test_cli_set_case.py @@ -3,7 +3,7 @@ from click.testing import CliRunner from cg.cli.set.case import set_case -from cg.constants import EXIT_SUCCESS, DataDelivery, Pipeline +from cg.constants import EXIT_SUCCESS, DataDelivery, Workflow from cg.models.cg_config import CGConfig from cg.store.models import Case from cg.store.store import Store @@ -143,7 +143,7 @@ def test_set_case_data_analysis( """Test to set a case using an existing data_analysis.""" # GIVEN a database with a case and a data_analysis not yet set on the case - data_analysis: str = Pipeline.FASTQ + data_analysis: str = Workflow.FASTQ case_to_alter: str = helpers.add_case(base_store) assert str(data_analysis) != case_to_alter.data_analysis diff --git a/tests/cli/upload/conftest.py b/tests/cli/upload/conftest.py index 532ca5663c..b4e8b7e64d 100644 --- a/tests/cli/upload/conftest.py +++ b/tests/cli/upload/conftest.py @@ -11,7 +11,7 @@ from cg.apps.housekeeper.hk import HousekeeperAPI from cg.apps.scout.scoutapi import ScoutAPI from cg.apps.tb import TrailblazerAPI -from cg.constants.constants import FileFormat, Pipeline +from cg.constants.constants import FileFormat, Workflow from cg.constants.delivery import PIPELINE_ANALYSIS_TAG_MAP from cg.constants.housekeeper_tags import ( HK_DELIVERY_REPORT_TAG, @@ -197,8 +197,8 @@ def fastq_context( base_context.meta_apis["delivery_api"] = DeliverAPI( store=base_context.status_db, hk_api=base_context.housekeeper_api, - case_tags=PIPELINE_ANALYSIS_TAG_MAP[Pipeline.FASTQ]["case_tags"], - sample_tags=PIPELINE_ANALYSIS_TAG_MAP[Pipeline.FASTQ]["sample_tags"], + case_tags=PIPELINE_ANALYSIS_TAG_MAP[Workflow.FASTQ]["case_tags"], + sample_tags=PIPELINE_ANALYSIS_TAG_MAP[Workflow.FASTQ]["sample_tags"], delivery_type="fastq", project_base_path=Path(base_context.delivery_path), ) diff --git a/tests/cli/upload/test_cli_scout.py b/tests/cli/upload/test_cli_scout.py index 12872a1c19..d53eb8245e 100644 --- a/tests/cli/upload/test_cli_scout.py +++ b/tests/cli/upload/test_cli_scout.py @@ -4,7 +4,7 @@ from click.testing import CliRunner, Result from cg.cli.upload.scout import create_scout_load_config, get_upload_api -from cg.constants import EXIT_SUCCESS, Pipeline +from cg.constants import EXIT_SUCCESS, Workflow from cg.meta.upload.scout.uploadscoutapi import UploadScoutAPI from cg.meta.upload.upload_api import UploadAPI from cg.meta.workflow.balsamic import BalsamicAnalysisAPI @@ -21,9 +21,9 @@ def test_get_upload_api(cg_context: CGConfig, case_id: str, helpers: StoreHelper # GIVEN a case with a balsamic analysis case: Case = helpers.ensure_case( - store=status_db, data_analysis=Pipeline.BALSAMIC, case_id=case_id + store=status_db, data_analysis=Workflow.BALSAMIC, case_id=case_id ) - helpers.add_analysis(store=status_db, pipeline=Pipeline.BALSAMIC, case=case) + helpers.add_analysis(store=status_db, pipeline=Workflow.BALSAMIC, case=case) # WHEN getting the upload API upload_api: UploadAPI = get_upload_api(cg_config=cg_context, case=case) @@ -45,9 +45,9 @@ def test_create_scout_load_config( # GIVEN a case with a balsamic analysis case: Case = helpers.ensure_case( - store=status_db, data_analysis=Pipeline.BALSAMIC, case_id=case_id + store=status_db, data_analysis=Workflow.BALSAMIC, case_id=case_id ) - helpers.add_analysis(store=status_db, pipeline=Pipeline.BALSAMIC, case=case) + helpers.add_analysis(store=status_db, pipeline=Workflow.BALSAMIC, case=case) with mock.patch.object(UploadScoutAPI, "generate_config", return_value=MockScoutLoadConfig()): # WHEN creating the scout load config diff --git a/tests/cli/upload/test_cli_upload_auto.py b/tests/cli/upload/test_cli_upload_auto.py index 675833934d..a30e8cd275 100644 --- a/tests/cli/upload/test_cli_upload_auto.py +++ b/tests/cli/upload/test_cli_upload_auto.py @@ -6,7 +6,7 @@ from click.testing import CliRunner from cg.cli.upload.base import upload_all_completed_analyses -from cg.constants import Pipeline +from cg.constants import Workflow from cg.models.cg_config import CGConfig from tests.store_helpers import StoreHelpers @@ -20,7 +20,7 @@ def test_upload_auto_with_pipeline_as_argument( ): """Test upload auto""" # GIVEN a store with a MIP analysis - pipeline = Pipeline.MIP_DNA + pipeline = Workflow.MIP_DNA helpers.add_analysis(store=upload_context.status_db, completed_at=timestamp, pipeline=pipeline) # WHEN uploading all analysis from pipeline MIP diff --git a/tests/cli/upload/test_cli_upload_fastq.py b/tests/cli/upload/test_cli_upload_fastq.py index 0bcd5f783f..7940157738 100644 --- a/tests/cli/upload/test_cli_upload_fastq.py +++ b/tests/cli/upload/test_cli_upload_fastq.py @@ -2,7 +2,7 @@ from cg.cli.upload.clinical_delivery import auto_fastq from cg.constants import DataDelivery -from cg.constants.constants import Pipeline +from cg.constants.constants import Workflow from cg.store.models import Analysis @@ -12,7 +12,7 @@ def test_auto_fastq_not_started( """Tests if the command finds a non-uploaded analysis and attempts to start it""" caplog.set_level(logging.INFO) # GIVEN a case to be delivered - analysis_obj.pipeline = Pipeline.FASTQ + analysis_obj.pipeline = Workflow.FASTQ analysis_obj.case.data_delivery = DataDelivery.FASTQ base_context.status_db.session.commit() base_context.status_db.session.close() diff --git a/tests/cli/upload/test_cli_upload_nipt.py b/tests/cli/upload/test_cli_upload_nipt.py index f7791d00c5..77404560bf 100644 --- a/tests/cli/upload/test_cli_upload_nipt.py +++ b/tests/cli/upload/test_cli_upload_nipt.py @@ -7,7 +7,7 @@ from cg.apps.tb.api import TrailblazerAPI from cg.cli.upload.nipt.base import nipt_upload_all, nipt_upload_case -from cg.constants.constants import Pipeline +from cg.constants.constants import Workflow from cg.meta.upload.nipt import NiptUploadAPI from cg.models.cg_config import CGConfig from cg.store.models import Analysis @@ -113,7 +113,7 @@ def test_nipt_statina_upload_auto( analysis_obj: Analysis = helpers.add_analysis( store=upload_context.status_db, completed_at=datetime.datetime.now(), - pipeline=Pipeline.FLUFFY, + pipeline=Workflow.FLUFFY, ) assert analysis_obj.completed_at assert not analysis_obj.uploaded_at @@ -173,7 +173,7 @@ def test_nipt_statina_upload_auto_analysis_without_case( analysis_obj: Analysis = helpers.add_analysis( store=upload_context.status_db, completed_at=datetime.datetime.now(), - pipeline=Pipeline.FLUFFY, + pipeline=Workflow.FLUFFY, ) analysis_obj.case = None mocker.patch.object(NiptUploadAPI, "get_all_upload_analyses", return_value=[analysis_obj]) @@ -195,7 +195,7 @@ def test_nipt_statina_upload_auto_dry_run( analysis_obj: Analysis = helpers.add_analysis( store=upload_context.status_db, completed_at=datetime.datetime.now(), - pipeline=Pipeline.FLUFFY, + pipeline=Workflow.FLUFFY, ) assert analysis_obj.completed_at assert not analysis_obj.uploaded_at diff --git a/tests/cli/upload/test_cli_upload_observations.py b/tests/cli/upload/test_cli_upload_observations.py index 98188a6f40..e59c2fecff 100644 --- a/tests/cli/upload/test_cli_upload_observations.py +++ b/tests/cli/upload/test_cli_upload_observations.py @@ -14,7 +14,7 @@ get_sequencing_method, ) from cg.constants import EXIT_SUCCESS -from cg.constants.constants import Pipeline +from cg.constants.constants import Workflow from cg.constants.sequencing import SequencingMethod from cg.constants.subject import PhenotypeStatus from cg.exc import CaseNotFoundError, LoqusdbUploadCaseError @@ -95,7 +95,7 @@ def test_get_observations_api(base_context: CGConfig, helpers: StoreHelpers): store: Store = base_context.status_db # GIVEN a Loqusdb supported case - case: Case = helpers.add_case(store, data_analysis=Pipeline.MIP_DNA) + case: Case = helpers.add_case(store, data_analysis=Workflow.MIP_DNA) sample: Sample = helpers.add_sample(store, application_type=SequencingMethod.WES) link = store.relate_sample(case=case, sample=sample, status=PhenotypeStatus.UNKNOWN) store.session.add(link) diff --git a/tests/cli/workflow/balsamic/conftest.py b/tests/cli/workflow/balsamic/conftest.py index d2409b4262..c832891c84 100644 --- a/tests/cli/workflow/balsamic/conftest.py +++ b/tests/cli/workflow/balsamic/conftest.py @@ -7,7 +7,7 @@ import pytest from cg.apps.housekeeper.hk import HousekeeperAPI -from cg.constants import Pipeline +from cg.constants import Workflow from cg.constants.constants import CaseActions, FileFormat, PrepCategory from cg.io.controller import WriteFile from cg.meta.workflow.balsamic import BalsamicAnalysisAPI @@ -322,7 +322,7 @@ def balsamic_context( store=status_db, internal_id="balsamic_case_wgs_paired_enough_reads", name="balsamic_case_wgs_paired_enough_reads", - data_analysis=Pipeline.BALSAMIC, + data_analysis=Workflow.BALSAMIC, action=CaseActions.HOLD, ) sample_case_wgs_paired_tumor_enough_reads = helpers.add_sample( @@ -357,7 +357,7 @@ def balsamic_context( store=status_db, internal_id="balsamic_case_wgs_paired", name="balsamic_case_wgs_paired", - data_analysis=Pipeline.BALSAMIC, + data_analysis=Workflow.BALSAMIC, action=CaseActions.HOLD, ) sample_case_wgs_paired_tumor = helpers.add_sample( @@ -384,7 +384,7 @@ def balsamic_context( status_db, internal_id="balsamic_case_tgs_paired", name="balsamic_case_tgs_paired", - data_analysis=Pipeline.BALSAMIC, + data_analysis=Workflow.BALSAMIC, ) sample_case_tgs_paired_tumor = helpers.add_sample( status_db, @@ -412,7 +412,7 @@ def balsamic_context( status_db, internal_id="balsamic_case_wgs_single", name="balsamic_case_wgs_single", - data_analysis=Pipeline.BALSAMIC, + data_analysis=Workflow.BALSAMIC, ) sample_case_wgs_single_tumor = helpers.add_sample( status_db, @@ -429,7 +429,7 @@ def balsamic_context( status_db, internal_id="balsamic_case_tgs_single", name="balsamic_case_tgs_single", - data_analysis=Pipeline.BALSAMIC, + data_analysis=Workflow.BALSAMIC, ) sample_case_tgs_single_tumor = helpers.add_sample( status_db, @@ -446,7 +446,7 @@ def balsamic_context( status_db, internal_id="balsamic_case_tgs_single_error", name="balsamic_case_tgs_single_error", - data_analysis=Pipeline.BALSAMIC, + data_analysis=Workflow.BALSAMIC, ) sample_case_tgs_single_normal_error = helpers.add_sample( status_db, @@ -467,7 +467,7 @@ def balsamic_context( status_db, internal_id="balsamic_case_tgs_paired_error", name="balsamic_case_tgs_paired_error", - data_analysis=Pipeline.BALSAMIC, + data_analysis=Workflow.BALSAMIC, ) sample_case_tgs_paired_tumor_error = helpers.add_sample( status_db, @@ -514,7 +514,7 @@ def balsamic_context( status_db, internal_id="balsamic_case_mixed_paired_error", name="balsamic_case_mixed_paired_error", - data_analysis=Pipeline.BALSAMIC, + data_analysis=Workflow.BALSAMIC, ) mixed_sample_case_wgs_paired_tumor_error = helpers.add_sample( status_db, @@ -547,7 +547,7 @@ def balsamic_context( status_db, internal_id="balsamic_case_mixed_wgs_mic_paired_error", name="balsamic_case_mixed_wgs_mic_paired_error", - data_analysis=Pipeline.BALSAMIC, + data_analysis=Workflow.BALSAMIC, ) mixed_sample_case_wgs_mic_paired_tumor_error = helpers.add_sample( status_db, @@ -580,7 +580,7 @@ def balsamic_context( status_db, internal_id="balsamic_case_mixed_bed_paired_error", name="balsamic_case_mixed_bed_paired_error", - data_analysis=Pipeline.BALSAMIC, + data_analysis=Workflow.BALSAMIC, ) mixed_sample_case_mixed_bed_paired_tumor_error = helpers.add_sample( status_db, @@ -614,7 +614,7 @@ def balsamic_context( status_db, internal_id="mip_case_wgs_single", name="mip_case_wgs_single", - data_analysis=Pipeline.MIP_DNA, + data_analysis=Workflow.MIP_DNA, ) mip_sample_case_wgs_single_tumor = helpers.add_sample( status_db, @@ -634,7 +634,7 @@ def balsamic_context( status_db, internal_id="balsamic_case_wgs_paired_two_normal_error", name="balsamic_case_wgs_paired_two_normal_error", - data_analysis=Pipeline.BALSAMIC, + data_analysis=Workflow.BALSAMIC, ) sample_case_wgs_paired_two_normal_tumor_error = helpers.add_sample( status_db, @@ -681,7 +681,7 @@ def balsamic_context( status_db, internal_id="balsamic_case_wes_tumor", name="balsamic_case_wes_tumor", - data_analysis=Pipeline.BALSAMIC, + data_analysis=Workflow.BALSAMIC, ) sample_case_wes_tumor = helpers.add_sample( status_db, @@ -698,7 +698,7 @@ def balsamic_context( status_db, internal_id="balsamic_case_wes_panel_error", name="balsamic_case_wes_panel_error", - data_analysis=Pipeline.BALSAMIC, + data_analysis=Workflow.BALSAMIC, ) sample_case_wes_panel_error = helpers.add_sample( status_db, diff --git a/tests/cli/workflow/conftest.py b/tests/cli/workflow/conftest.py index 5c8dcee883..4451724ec2 100644 --- a/tests/cli/workflow/conftest.py +++ b/tests/cli/workflow/conftest.py @@ -6,7 +6,7 @@ import pytest -from cg.constants import DataDelivery, FlowCellStatus, Pipeline +from cg.constants import DataDelivery, FlowCellStatus, Workflow from cg.models.cg_config import CGConfig from cg.store.crud.read import ReadHandler from cg.store.models import Case @@ -32,20 +32,20 @@ def analysis_store(base_store: Store, workflow_case_id: str, helpers: StoreHelpe """Store to be used in tests""" _store = base_store - case = helpers.add_case(_store, workflow_case_id, data_analysis=Pipeline.MIP_DNA) + case = helpers.add_case(_store, workflow_case_id, data_analysis=Workflow.MIP_DNA) dna_sample = helpers.add_sample( _store, "dna_sample", is_rna=False, reads=10000000, last_sequenced_at=datetime.now() ) helpers.add_relationship(_store, sample=dna_sample, case=case) - case = helpers.add_case(_store, "rna_case", data_analysis=Pipeline.MIP_RNA) + case = helpers.add_case(_store, "rna_case", data_analysis=Workflow.MIP_RNA) rna_sample = helpers.add_sample( _store, "rna_sample", is_rna=True, reads=10000000, last_sequenced_at=datetime.now() ) helpers.add_relationship(_store, sample=rna_sample, case=case) - case = helpers.add_case(_store, "dna_rna_mix_case", data_analysis=Pipeline.MIP_DNA) + case = helpers.add_case(_store, "dna_rna_mix_case", data_analysis=Workflow.MIP_DNA) helpers.add_relationship(_store, sample=rna_sample, case=case) helpers.add_relationship(_store, sample=dna_sample, case=case) @@ -76,7 +76,7 @@ def fastq_case(case_id, family_name, sample_id, cust_sample_id, ticket_id: str) "name": family_name, "panels": None, "internal_id": case_id, - "data_analysis": Pipeline.FASTQ, + "data_analysis": Workflow.FASTQ, "data_delivery": DataDelivery.FASTQ, "completed_at": None, "action": None, diff --git a/tests/cli/workflow/fastq/test_fastq_base.py b/tests/cli/workflow/fastq/test_fastq_base.py index 03757ea806..060ca7a58c 100644 --- a/tests/cli/workflow/fastq/test_fastq_base.py +++ b/tests/cli/workflow/fastq/test_fastq_base.py @@ -5,7 +5,7 @@ store_available_fastq_analysis, store_fastq_analysis, ) -from cg.constants.constants import CaseActions, Pipeline +from cg.constants.constants import CaseActions, Workflow from cg.store.models import Analysis, Case, Sample @@ -45,7 +45,7 @@ def test_store_available_fastq_analysis( fastq_context.status_db, case_id=another_case_id, sample_id="sample_for_another_case_id" ) assert not case_obj.analyses - case_obj.data_analysis = Pipeline.FASTQ + case_obj.data_analysis = Workflow.FASTQ case_obj.action = CaseActions.ANALYZE case_obj.samples[0].last_sequenced_at = datetime.now() diff --git a/tests/cli/workflow/fluffy/conftest.py b/tests/cli/workflow/fluffy/conftest.py index bb935aecad..9416381f75 100644 --- a/tests/cli/workflow/fluffy/conftest.py +++ b/tests/cli/workflow/fluffy/conftest.py @@ -5,7 +5,7 @@ from cg.apps.housekeeper.hk import HousekeeperAPI from cg.apps.housekeeper.models import InputBundle -from cg.constants import Pipeline +from cg.constants import Workflow from cg.meta.workflow.fluffy import FluffyAnalysisAPI from cg.models.cg_config import CGConfig from cg.store.models import Sample @@ -160,7 +160,7 @@ def fluffy_context( fluffy_analysis_api.status_db, internal_id=fluffy_case_id_existing, name=fluffy_case_id_existing, - data_analysis=Pipeline.FLUFFY, + data_analysis=Workflow.FLUFFY, ) example_fluffy_sample = helpers.add_sample( fluffy_analysis_api.status_db, diff --git a/tests/cli/workflow/mip/conftest.py b/tests/cli/workflow/mip/conftest.py index 21bead7793..a6479f88e4 100644 --- a/tests/cli/workflow/mip/conftest.py +++ b/tests/cli/workflow/mip/conftest.py @@ -6,7 +6,7 @@ from cg.apps.housekeeper.models import InputBundle from cg.apps.scout.scoutapi import ScoutAPI from cg.apps.tb import TrailblazerAPI -from cg.constants import Pipeline +from cg.constants import Workflow from cg.constants.subject import Sex from cg.meta.compress import CompressAPI from cg.meta.workflow.mip_dna import MipDNAAnalysisAPI @@ -107,7 +107,7 @@ def mip_rna_context( ) -> CGConfig: cg_context.housekeeper_api_ = housekeeper_api cg_context.trailblazer_api_ = tb_api - analysis_family_single_case["data_analysis"] = str(Pipeline.MIP_RNA) + analysis_family_single_case["data_analysis"] = Workflow.MIP_RNA if not cg_context.status_db.get_case_by_internal_id(internal_id=case_id): helpers.ensure_case_from_dict( cg_context.status_db, case_info=analysis_family_single_case, app_tag=apptag_rna @@ -138,7 +138,7 @@ def mip_dna_context( if not _store.get_case_by_internal_id(internal_id=case_id): case_obj = helpers.add_case( store=_store, - data_analysis=Pipeline.MIP_DNA, + data_analysis=Workflow.MIP_DNA, internal_id=case_id, name=mip_case_ids[case_id]["name"], ) diff --git a/tests/cli/workflow/mip/test_cli_mip_dna_start.py b/tests/cli/workflow/mip/test_cli_mip_dna_start.py index 8c6b0cf342..e8231f5c27 100644 --- a/tests/cli/workflow/mip/test_cli_mip_dna_start.py +++ b/tests/cli/workflow/mip/test_cli_mip_dna_start.py @@ -3,7 +3,7 @@ import logging from cg.cli.workflow.mip_dna.base import start_available -from cg.constants import EXIT_SUCCESS, Pipeline +from cg.constants import EXIT_SUCCESS, Workflow from cg.meta.workflow.prepare_fastq import PrepareFastqAPI @@ -63,7 +63,7 @@ def test_rna_case_excluded(cli_runner, caplog, mip_dna_context, rna_case, mocker # GIVEN a case that is ready for MIP RNA analysis # -> has a sample that is sequenced and has an rna-application (wts) - assert rna_case.data_analysis == str(Pipeline.MIP_RNA) + assert rna_case.data_analysis == Workflow.MIP_RNA for link in rna_case.links: sample = link.sample assert sample.last_sequenced_at diff --git a/tests/cli/workflow/rnafusion/test_cli_rnafusion_run.py b/tests/cli/workflow/rnafusion/test_cli_rnafusion_run.py index 295842ef76..9fb9b491d8 100644 --- a/tests/cli/workflow/rnafusion/test_cli_rnafusion_run.py +++ b/tests/cli/workflow/rnafusion/test_cli_rnafusion_run.py @@ -201,7 +201,7 @@ def test_resume_with_id( assert result.exit_code == EXIT_SUCCESS # THEN command should use tower for relaunch - assert "Pipeline will be resumed from run" in caplog.text + assert "Workflow will be resumed from run" in caplog.text assert "tw runs relaunch" in caplog.text @@ -228,7 +228,7 @@ def test_resume_without_id( assert result.exit_code == EXIT_SUCCESS # THEN command should use tower for relaunch - assert "Pipeline will be resumed from run" in caplog.text + assert "Workflow will be resumed from run" in caplog.text assert "tw runs relaunch" in caplog.text diff --git a/tests/cli/workflow/taxprofiler/test_cli_taxprofiler_run.py b/tests/cli/workflow/taxprofiler/test_cli_taxprofiler_run.py index 6e5e997c9a..d9b4dda972 100644 --- a/tests/cli/workflow/taxprofiler/test_cli_taxprofiler_run.py +++ b/tests/cli/workflow/taxprofiler/test_cli_taxprofiler_run.py @@ -2,7 +2,6 @@ import logging -import pytest from _pytest.logging import LogCaptureFixture from click.testing import CliRunner @@ -126,5 +125,5 @@ def test_with_config_use_tower_resume( assert result.exit_code == EXIT_SUCCESS # THEN command should use tower for relaunch - assert "Pipeline will be resumed from run" in caplog.text + assert "Workflow will be resumed from run" in caplog.text assert "path/to/bin/tw runs relaunch" in caplog.text diff --git a/tests/conftest.py b/tests/conftest.py index be4758b4a6..56c7f0b692 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -24,7 +24,7 @@ from cg.apps.housekeeper.hk import HousekeeperAPI from cg.apps.lims import LimsAPI from cg.apps.slurm.slurm_api import SlurmAPI -from cg.constants import FileExtensions, Pipeline, SequencingFileTag +from cg.constants import FileExtensions, SequencingFileTag, Workflow from cg.constants.constants import CaseActions, FileFormat, Strandedness from cg.constants.demultiplexing import DemultiplexingDirsAndFiles from cg.constants.housekeeper_tags import HK_DELIVERY_REPORT_TAG @@ -33,7 +33,7 @@ from cg.constants.subject import Sex from cg.io.controller import WriteFile from cg.io.json import read_json, write_json -from cg.io.yaml import write_yaml, read_yaml +from cg.io.yaml import read_yaml, write_yaml from cg.meta.encryption.encryption import FlowCellEncryptionAPI from cg.meta.rsync import RsyncAPI from cg.meta.tar.tar import TarAPI @@ -204,7 +204,7 @@ def analysis_family_single_case( return { "name": family_name, "internal_id": case_id, - "data_analysis": str(Pipeline.MIP_DNA), + "data_analysis": Workflow.MIP_DNA, "application_type": "wgs", "panels": ["IEM", "EP"], "tickets": ticket_id, @@ -228,7 +228,7 @@ def analysis_family(case_id: str, family_name: str, sample_id: str, ticket_id: s return { "name": family_name, "internal_id": case_id, - "data_analysis": str(Pipeline.MIP_DNA), + "data_analysis": Workflow.MIP_DNA, "application_type": "wgs", "tickets": ticket_id, "panels": ["IEM", "EP"], @@ -2072,13 +2072,13 @@ def store_with_cases_and_customers( ) customers.append(customer) - case_details: list[tuple[str, str, Pipeline, CaseActions, Customer]] = [ - ("case 1", "flyingwhale", Pipeline.BALSAMIC, CaseActions.RUNNING, customers[0]), - ("case 2", "swimmingtiger", Pipeline.FLUFFY, CaseActions.ANALYZE, customers[0]), - ("case 3", "sadbaboon", Pipeline.MUTANT, CaseActions.HOLD, customers[1]), - ("case 4", "funkysloth", Pipeline.MIP_DNA, CaseActions.ANALYZE, customers[1]), - ("case 5", "deadparrot", Pipeline.MICROSALT, CaseActions.RUNNING, customers[2]), - ("case 6", "anxiousbeetle", Pipeline.DEMULTIPLEX, CaseActions.RUNNING, customers[2]), + case_details: list[tuple[str, str, Workflow, CaseActions, Customer]] = [ + ("case 1", "flyingwhale", Workflow.BALSAMIC, CaseActions.RUNNING, customers[0]), + ("case 2", "swimmingtiger", Workflow.FLUFFY, CaseActions.ANALYZE, customers[0]), + ("case 3", "sadbaboon", Workflow.MUTANT, CaseActions.HOLD, customers[1]), + ("case 4", "funkysloth", Workflow.MIP_DNA, CaseActions.ANALYZE, customers[1]), + ("case 5", "deadparrot", Workflow.MICROSALT, CaseActions.RUNNING, customers[2]), + ("case 6", "anxiousbeetle", Workflow.DEMULTIPLEX, CaseActions.RUNNING, customers[2]), ] for case_name, case_id, pipeline, action, customer in case_details: @@ -2325,7 +2325,7 @@ def rnafusion_context( store=status_db, internal_id=rnafusion_case_id, name=rnafusion_case_id, - data_analysis=Pipeline.RNAFUSION, + data_analysis=Workflow.RNAFUSION, ) sample_rnafusion_case_enough_reads: Sample = helpers.add_sample( @@ -2347,7 +2347,7 @@ def rnafusion_context( store=status_db, internal_id=case_id_not_enough_reads, name=case_id_not_enough_reads, - data_analysis=Pipeline.RNAFUSION, + data_analysis=Workflow.RNAFUSION, ) sample_not_enough_reads: Sample = helpers.add_sample( @@ -2601,7 +2601,7 @@ def taxprofiler_context( store=status_db, internal_id=taxprofiler_case_id, name=taxprofiler_case_id, - data_analysis=Pipeline.TAXPROFILER, + data_analysis=Workflow.TAXPROFILER, ) taxprofiler_sample: Sample = helpers.add_sample( @@ -3019,7 +3019,7 @@ def raredisease_context( store=status_db, internal_id=raredisease_case_id, name=raredisease_case_id, - data_analysis=Pipeline.RAREDISEASE, + data_analysis=Workflow.RAREDISEASE, ) sample_raredisease_case_enough_reads: Sample = helpers.add_sample( @@ -3041,7 +3041,7 @@ def raredisease_context( store=status_db, internal_id=case_id_not_enough_reads, name=case_id_not_enough_reads, - data_analysis=Pipeline.RAREDISEASE, + data_analysis=Workflow.RAREDISEASE, ) sample_not_enough_reads: Sample = helpers.add_sample( diff --git a/tests/meta/archive/conftest.py b/tests/meta/archive/conftest.py index e84d9b5318..347486943e 100644 --- a/tests/meta/archive/conftest.py +++ b/tests/meta/archive/conftest.py @@ -12,7 +12,7 @@ from cg.apps.housekeeper.hk import HousekeeperAPI from cg.constants import SequencingFileTag from cg.constants.archiving import ArchiveLocations -from cg.constants.constants import DataDelivery, FileFormat, Pipeline +from cg.constants.constants import DataDelivery, FileFormat, Workflow from cg.constants.subject import Sex from cg.io.controller import WriteStream from cg.meta.archive.archive import SpringArchiveAPI @@ -273,7 +273,7 @@ def archive_store( base_store.session.add_all(new_samples) base_store.session.commit() case: Case = base_store.add_case( - data_analysis=Pipeline.MIP_DNA, + data_analysis=Workflow.MIP_DNA, data_delivery=DataDelivery.NO_DELIVERY, name="dummy_name", ticket="123", diff --git a/tests/meta/deliver/test_deliver_ticket.py b/tests/meta/deliver/test_deliver_ticket.py index c479ce8efa..6a322a3403 100644 --- a/tests/meta/deliver/test_deliver_ticket.py +++ b/tests/meta/deliver/test_deliver_ticket.py @@ -3,7 +3,7 @@ import logging from pathlib import Path -from cg.constants.constants import Pipeline +from cg.constants.constants import Workflow from cg.constants.delivery import INBOX_NAME from cg.meta.deliver_ticket import DeliverTicketAPI from cg.models.cg_config import CGConfig @@ -23,7 +23,7 @@ def test_get_inbox_path( store=cg_context.status_db, internal_id="angrybird", name=ticket_id, - data_analysis=Pipeline.MUTANT, + data_analysis=Workflow.MUTANT, ) mocker.patch.object(DeliverTicketAPI, "get_all_cases_from_ticket") @@ -80,7 +80,7 @@ def test_generate_date_tag(cg_context: CGConfig, mocker, helpers, ticket_id: str store=cg_context.status_db, internal_id="angrybird", name=ticket_id, - data_analysis=Pipeline.MUTANT, + data_analysis=Workflow.MUTANT, ) case.ordered_at = timestamp_now diff --git a/tests/meta/deliver/test_delivery_api.py b/tests/meta/deliver/test_delivery_api.py index 81b8fd501b..2d1d07de62 100644 --- a/tests/meta/deliver/test_delivery_api.py +++ b/tests/meta/deliver/test_delivery_api.py @@ -5,7 +5,7 @@ from housekeeper.store.models import Version from cg.apps.housekeeper.hk import HousekeeperAPI -from cg.constants.constants import Pipeline +from cg.constants.constants import Workflow from cg.constants.delivery import INBOX_NAME from cg.constants.housekeeper_tags import AlignmentFileTag from cg.meta.deliver import DeliverAPI @@ -155,7 +155,7 @@ def test_get_sample_files_from_version( def test_get_delivery_scope_case_only(): """Testing the delivery scope of a case only delivery.""" # GIVEN a case only delivery type - delivery_type: set[str] = {Pipeline.MIP_DNA} + delivery_type: set[str] = {Workflow.MIP_DNA} # WHEN getting the delivery scope sample_delivery, case_delivery = DeliverAPI.get_delivery_scope(delivery_type) @@ -168,7 +168,7 @@ def test_get_delivery_scope_case_only(): def test_get_delivery_scope_sample_only(): """Testing the delivery scope of a sample only delivery.""" # GIVEN a sample only delivery type - delivery_type = {Pipeline.FASTQ} + delivery_type = {Workflow.FASTQ} # WHEN getting the delivery scope sample_delivery, case_delivery = DeliverAPI.get_delivery_scope(delivery_type) @@ -181,7 +181,7 @@ def test_get_delivery_scope_sample_only(): def test_get_delivery_scope_case_and_sample(): """Testing the delivery scope of a case and sample delivery.""" # GIVEN a case and sample delivery type - delivery_type = {Pipeline.MUTANT} + delivery_type = {Workflow.MUTANT} # WHEN getting the delivery scope sample_delivery, case_delivery = DeliverAPI.get_delivery_scope(delivery_type) diff --git a/tests/meta/orders/test_PoolSubmitter_validate_order.py b/tests/meta/orders/test_PoolSubmitter_validate_order.py index 4fc3fc35aa..3af6fc953b 100644 --- a/tests/meta/orders/test_PoolSubmitter_validate_order.py +++ b/tests/meta/orders/test_PoolSubmitter_validate_order.py @@ -1,7 +1,7 @@ import pytest from cg.constants import DataDelivery -from cg.constants.constants import Pipeline +from cg.constants.constants import Workflow from cg.exc import OrderError from cg.meta.orders.pool_submitter import PoolSubmitter from cg.models.orders.constants import OrderType @@ -32,7 +32,7 @@ def test_validate_case_name(rml_order_to_submit: dict, base_store: Store, helper store=base_store, case_name=PoolSubmitter.create_case_name(ticket=order.ticket, pool_name=sample.pool), customer=customer, - data_analysis=Pipeline.FLUFFY, + data_analysis=Workflow.FLUFFY, data_delivery=DataDelivery.STATINA, ) base_store.session.add(case) diff --git a/tests/meta/orders/test_SarsCov2Submitter_store_order.py b/tests/meta/orders/test_SarsCov2Submitter_store_order.py index 02af73071c..6905c29097 100644 --- a/tests/meta/orders/test_SarsCov2Submitter_store_order.py +++ b/tests/meta/orders/test_SarsCov2Submitter_store_order.py @@ -1,7 +1,7 @@ import datetime as dt from cg.constants import DataDelivery -from cg.constants.constants import Pipeline +from cg.constants.constants import Workflow from cg.meta.orders.sars_cov_2_submitter import SarsCov2Submitter from cg.models.orders.constants import OrderType from cg.models.orders.order import OrderIn @@ -27,7 +27,7 @@ def test_store_items_in_status_control_has_stored_value( submitter.store_items_in_status( comment="", customer_id=order.customer, - data_analysis=Pipeline.MUTANT, + data_analysis=Workflow.MUTANT, data_delivery=DataDelivery.FASTQ, order="", ordered=dt.datetime.now(), diff --git a/tests/meta/orders/test_meta_orders_api.py b/tests/meta/orders/test_meta_orders_api.py index 28f3a622a2..0e22eb3525 100644 --- a/tests/meta/orders/test_meta_orders_api.py +++ b/tests/meta/orders/test_meta_orders_api.py @@ -4,7 +4,7 @@ import pytest from cg.constants import DataDelivery -from cg.constants.constants import Pipeline +from cg.constants.constants import Workflow from cg.constants.subject import Sex from cg.exc import OrderError, TicketCreationError from cg.meta.orders import OrdersAPI @@ -245,7 +245,7 @@ def test_submit_duplicate_sample_case_name( case_id = sample.family_name if not store.get_case_by_name_and_customer(customer=customer, case_name=case_id): case: Case = store.add_case( - data_analysis=Pipeline.MIP_DNA, + data_analysis=Workflow.MIP_DNA, data_delivery=DataDelivery.SCOUT, name=case_id, ticket=ticket_id, diff --git a/tests/meta/orders/test_meta_orders_lims.py b/tests/meta/orders/test_meta_orders_lims.py index ac07a74878..6c2a5ad61d 100644 --- a/tests/meta/orders/test_meta_orders_lims.py +++ b/tests/meta/orders/test_meta_orders_lims.py @@ -1,6 +1,6 @@ import pytest -from cg.constants import Pipeline +from cg.constants import Workflow from cg.meta.orders.lims import build_lims_sample from cg.models.lims.sample import LimsSample from cg.models.orders.constants import OrderType @@ -141,9 +141,9 @@ def test_to_lims_balsamic(balsamic_order_to_submit, project): assert first_sample["name"] == "s1" assert {sample.container for sample in samples} == set(["96 well plate"]) assert first_sample["udfs"]["data_analysis"] in [ - str(Pipeline.BALSAMIC), - str(Pipeline.BALSAMIC_QC), - str(Pipeline.BALSAMIC_UMI), + Workflow.BALSAMIC, + Workflow.BALSAMIC_QC, + Workflow.BALSAMIC_UMI, ] assert first_sample["udfs"]["application"] == "WGSPCFC030" assert first_sample["udfs"]["sex"] == "M" diff --git a/tests/meta/orders/test_meta_orders_status.py b/tests/meta/orders/test_meta_orders_status.py index 507f63d8ef..592805570b 100644 --- a/tests/meta/orders/test_meta_orders_status.py +++ b/tests/meta/orders/test_meta_orders_status.py @@ -3,7 +3,7 @@ import pytest -from cg.constants import DataDelivery, Pipeline, Priority +from cg.constants import DataDelivery, Priority, Workflow from cg.constants.constants import CaseActions, PrepCategory from cg.exc import OrderError from cg.meta.orders import OrdersAPI @@ -40,7 +40,7 @@ def test_pools_to_status(rml_order_to_submit): pool = data["pools"][0] assert pool["name"] == "pool-1" assert pool["application"] == "RMLP05R800" - assert pool["data_analysis"] == str(Pipeline.FASTQ) + assert pool["data_analysis"] == Workflow.FASTQ assert pool["data_delivery"] == str(DataDelivery.FASTQ) assert len(pool["samples"]) == 2 sample = pool["samples"][0] @@ -98,7 +98,7 @@ def test_microbial_samples_to_status(microbial_order_to_submit): assert data["customer"] == "cust002" assert data["order"] == "Microbial samples" assert data["comment"] == "Order comment" - assert data["data_analysis"] == str(Pipeline.MICROSALT) + assert data["data_analysis"] == Workflow.MICROSALT assert data["data_delivery"] == str(DataDelivery.FASTQ) # THEN first sample should contain all the relevant data from the microbial order @@ -125,7 +125,7 @@ def test_sarscov2_samples_to_status(sarscov2_order_to_submit): assert data["customer"] == "cust002" assert data["order"] == "Sars-CoV-2 samples" assert data["comment"] == "Order comment" - assert data["data_analysis"] == str(Pipeline.MUTANT) + assert data["data_analysis"] == Workflow.MUTANT assert data["data_delivery"] == str(DataDelivery.FASTQ) # THEN first sample should contain all the relevant data from the microbial order @@ -152,7 +152,7 @@ def test_cases_to_status(mip_order_to_submit): assert len(data["families"]) == 2 family = data["families"][0] assert family["name"] == "family1" - assert family["data_analysis"] == str(Pipeline.MIP_DNA) + assert family["data_analysis"] == Workflow.MIP_DNA assert family["data_delivery"] == str(DataDelivery.SCOUT) assert family["priority"] == Priority.standard.name assert family["cohorts"] == ["Other"] @@ -236,7 +236,7 @@ def test_store_rml(orders_api, base_store, rml_status_data, ticket_id: str): assert new_pool.deliveries[0].destination == "caesar" new_case = base_store.get_cases()[0] - assert new_case.data_analysis == str(Pipeline.FASTQ) + assert new_case.data_analysis == Workflow.FASTQ assert new_case.data_delivery == str(DataDelivery.FASTQ) # and that the pool is set for invoicing but not the samples of the pool @@ -316,7 +316,7 @@ def test_store_fastq_samples_non_tumour_wgs_to_mip(orders_api, base_store, fastq ) # THEN the analysis for the case should be MAF - assert new_samples[0].links[0].case.data_analysis == Pipeline.MIP_DNA + assert new_samples[0].links[0].case.data_analysis == Workflow.MIP_DNA def test_store_fastq_samples_tumour_wgs_to_fastq( @@ -342,7 +342,7 @@ def test_store_fastq_samples_tumour_wgs_to_fastq( ) # THEN the analysis for the case should be FASTQ - assert new_samples[0].links[0].case.data_analysis == Pipeline.FASTQ + assert new_samples[0].links[0].case.data_analysis == Workflow.FASTQ def test_store_fastq_samples_non_wgs_as_fastq( @@ -374,7 +374,7 @@ def test_store_fastq_samples_non_wgs_as_fastq( ) # THEN the analysis for the case should be fastq (none) - assert new_samples[0].links[0].case.data_analysis == Pipeline.FASTQ + assert new_samples[0].links[0].case.data_analysis == Workflow.FASTQ def test_store_samples_bad_apptag(orders_api, base_store, fastq_status_data, ticket_id: str): @@ -415,7 +415,7 @@ def test_store_microbial_samples(orders_api, base_store, microbial_status_data, ticket_id=ticket_id, items=microbial_status_data["samples"], comment="", - data_analysis=Pipeline.MICROSALT, + data_analysis=Workflow.MICROSALT, data_delivery=DataDelivery.FASTQ_QC, ) @@ -445,7 +445,7 @@ def test_store_microbial_case_data_analysis_stored( ticket_id=ticket_id, items=microbial_status_data["samples"], comment="", - data_analysis=Pipeline.MICROSALT, + data_analysis=Workflow.MICROSALT, data_delivery=DataDelivery.FASTQ_QC, ) @@ -454,7 +454,7 @@ def test_store_microbial_case_data_analysis_stored( assert base_store._get_query(table=Case).count() == 1 microbial_case = base_store.get_cases()[0] - assert microbial_case.data_analysis == str(Pipeline.MICROSALT) + assert microbial_case.data_analysis == Workflow.MICROSALT assert microbial_case.data_delivery == str(DataDelivery.FASTQ_QC) @@ -474,7 +474,7 @@ def test_store_microbial_sample_priority( ticket_id=ticket_id, items=microbial_status_data["samples"], comment="", - data_analysis=Pipeline.MICROSALT, + data_analysis=Workflow.MICROSALT, data_delivery=DataDelivery.FASTQ_QC, ) @@ -510,7 +510,7 @@ def test_store_mip(orders_api, base_store: Store, mip_status_data, ticket_id: st assert len(new_case.links) == 3 new_link = new_case.links[0] - assert new_case.data_analysis == str(Pipeline.MIP_DNA) + assert new_case.data_analysis == Workflow.MIP_DNA assert new_case.data_delivery == str(DataDelivery.SCOUT) assert set(new_case.cohorts) == {"Other"} assert ( @@ -563,7 +563,7 @@ def test_store_mip_rna(orders_api, base_store, mip_rna_status_data, ticket_id: s assert len(new_casing.links) == 2 new_link = new_casing.links[0] - assert new_casing.data_analysis == str(Pipeline.MIP_RNA) + assert new_casing.data_analysis == Workflow.MIP_RNA assert new_casing.data_delivery == str(DataDelivery.SCOUT) assert new_link.sample.name == "sample1-rna-t1" assert new_link.sample.application_version.application.tag == rna_application_tag @@ -638,9 +638,9 @@ def test_store_cancer_samples( new_case = new_families[0] assert new_case.name == "family1" assert new_case.data_analysis in [ - str(Pipeline.BALSAMIC), - str(Pipeline.BALSAMIC_QC), - str(Pipeline.BALSAMIC_UMI), + Workflow.BALSAMIC, + Workflow.BALSAMIC_QC, + Workflow.BALSAMIC_UMI, ] assert new_case.data_delivery == str(DataDelivery.FASTQ_ANALYSIS_SCOUT) assert set(new_case.panels) == set() diff --git a/tests/meta/report/conftest.py b/tests/meta/report/conftest.py index 4f807b9fcd..d1f179ed53 100644 --- a/tests/meta/report/conftest.py +++ b/tests/meta/report/conftest.py @@ -3,7 +3,7 @@ import pytest -from cg.constants import Pipeline +from cg.constants import Workflow from cg.constants.constants import FileFormat from cg.io.controller import ReadFile from cg.meta.report.balsamic import BalsamicReportAPI @@ -25,7 +25,7 @@ def report_api_mip_dna( ) -> MipDNAReportAPI: """MIP DNA ReportAPI fixture.""" cg_context.meta_apis["analysis_api"] = MockMipAnalysis( - config=cg_context, pipeline=Pipeline.MIP_DNA + config=cg_context, pipeline=Workflow.MIP_DNA ) cg_context.status_db_ = report_store cg_context.lims_api_ = MockLimsAPI(cg_context, lims_samples) @@ -77,7 +77,7 @@ def case_samples_data(case_id: str, report_api_mip_dna: MipDNAReportAPI): @pytest.fixture(scope="function") def mip_analysis_api(cg_context: CGConfig) -> MockMipAnalysis: """MIP analysis mock data.""" - return MockMipAnalysis(config=cg_context, pipeline=Pipeline.MIP_DNA) + return MockMipAnalysis(config=cg_context, pipeline=Workflow.MIP_DNA) @pytest.fixture(scope="session") @@ -99,9 +99,9 @@ def report_store(analysis_store, helpers, timestamp_yesterday): """A mock store instance for report testing.""" case = analysis_store.get_cases()[0] helpers.add_analysis( - analysis_store, case, pipeline=Pipeline.MIP_DNA, started_at=timestamp_yesterday + analysis_store, case, pipeline=Workflow.MIP_DNA, started_at=timestamp_yesterday ) - helpers.add_analysis(analysis_store, case, pipeline=Pipeline.MIP_DNA, started_at=datetime.now()) + helpers.add_analysis(analysis_store, case, pipeline=Workflow.MIP_DNA, started_at=datetime.now()) # Mock sample dates to calculate processing times for family_sample in analysis_store.get_case_samples_by_case_id( case_internal_id=case.internal_id diff --git a/tests/meta/report/test_report_api.py b/tests/meta/report/test_report_api.py index ab58537385..deaf7dc343 100644 --- a/tests/meta/report/test_report_api.py +++ b/tests/meta/report/test_report_api.py @@ -7,7 +7,7 @@ import pytest from _pytest.logging import LogCaptureFixture -from cg.constants import REPORT_GENDER, Pipeline +from cg.constants import REPORT_GENDER, Workflow from cg.exc import DeliveryReportError from cg.meta.report.mip_dna import MipDNAReportAPI from cg.meta.workflow.mip_dna import MipDNAAnalysisAPI @@ -393,7 +393,7 @@ def test_get_case_analysis_data_pipeline_match_error( # GIVEN a pre-built case and a MIP-DNA analysis that has been started as Balsamic mip_analysis: Analysis = case_mip_dna.analyses[0] - mip_analysis.pipeline = Pipeline.BALSAMIC + mip_analysis.pipeline = Workflow.BALSAMIC # GIVEN a mip analysis mock metadata mip_metadata: MipAnalysis = mip_analysis_api.get_latest_metadata(case_mip_dna.internal_id) @@ -406,7 +406,7 @@ def test_get_case_analysis_data_pipeline_match_error( case=case_mip_dna, analysis=mip_analysis, analysis_metadata=mip_metadata ) assert ( - f"The analysis requested by the customer ({Pipeline.MIP_DNA}) does not match the one executed " + f"The analysis requested by the customer ({Workflow.MIP_DNA}) does not match the one executed " f"({mip_analysis.pipeline})" in caplog.text ) @@ -420,9 +420,9 @@ def test_get_case_analysis_data_pipeline_not_supported( """Test validation error if the analysis pipeline is not supported by the delivery report workflow.""" # GIVEN a pre-built case with Fluffy as data analysis - case_mip_dna.data_analysis = Pipeline.FLUFFY + case_mip_dna.data_analysis = Workflow.FLUFFY mip_analysis: Analysis = case_mip_dna.analyses[0] - mip_analysis.pipeline = Pipeline.FLUFFY + mip_analysis.pipeline = Workflow.FLUFFY # GIVEN a mip analysis mock metadata mip_metadata: MipAnalysis = mip_analysis_api.get_latest_metadata(case_mip_dna.internal_id) diff --git a/tests/meta/rsync/conftest.py b/tests/meta/rsync/conftest.py index 048a0ef52c..9ec6e5f2ce 100644 --- a/tests/meta/rsync/conftest.py +++ b/tests/meta/rsync/conftest.py @@ -2,7 +2,7 @@ import pytest -from cg.constants.constants import Pipeline +from cg.constants.constants import Workflow from cg.models.cg_config import CGConfig from cg.store.models import Case @@ -14,7 +14,7 @@ def mutant_case(cg_context: CGConfig, case_id: str, ticket_id: str, helpers) -> store=cg_context.status_db, internal_id=case_id, name=ticket_id, - data_analysis=Pipeline.MUTANT, + data_analysis=Workflow.MUTANT, ) return case @@ -26,7 +26,7 @@ def microsalt_case(cg_context: CGConfig, case_id: str, ticket_id: str, helpers) store=cg_context.status_db, internal_id=case_id, name=ticket_id, - data_analysis=Pipeline.MICROSALT, + data_analysis=Workflow.MICROSALT, ) return case diff --git a/tests/meta/upload/conftest.py b/tests/meta/upload/conftest.py index cfecd62be8..cef428bef7 100644 --- a/tests/meta/upload/conftest.py +++ b/tests/meta/upload/conftest.py @@ -7,7 +7,7 @@ from cg.apps.coverage.api import ChanjoAPI from cg.apps.housekeeper.hk import HousekeeperAPI -from cg.constants import Pipeline +from cg.constants import Workflow from cg.constants.housekeeper_tags import HkMipAnalysisTag from cg.meta.upload.coverage import UploadCoverageApi from cg.meta.upload.gt import UploadGenotypesAPI @@ -98,7 +98,7 @@ def mip_dna_case(mip_dna_context: CGConfig, helpers: StoreHelpers) -> Case: store=store, internal_id="mip-dna-case", name="mip-dna-case", - data_analysis=Pipeline.MIP_DNA, + data_analysis=Workflow.MIP_DNA, ) dna_mip_sample: Sample = helpers.add_sample( store=store, application_type="wgs", internal_id="mip-dna-case" @@ -108,7 +108,7 @@ def mip_dna_case(mip_dna_context: CGConfig, helpers: StoreHelpers) -> Case: helpers.add_analysis( store=store, case=mip_dna_case, - pipeline=Pipeline.MIP_DNA, + pipeline=Workflow.MIP_DNA, ) return mip_dna_case @@ -126,5 +126,5 @@ def mip_rna_analysis(mip_rna_context: CGConfig, helpers: StoreHelpers, mip_rna_c return helpers.add_analysis( store=mip_rna_context.status_db, case=mip_rna_case, - pipeline=Pipeline.MIP_RNA, + pipeline=Workflow.MIP_RNA, ) diff --git a/tests/meta/upload/scout/conftest.py b/tests/meta/upload/scout/conftest.py index 936fc49d0c..f7c841701a 100644 --- a/tests/meta/upload/scout/conftest.py +++ b/tests/meta/upload/scout/conftest.py @@ -8,7 +8,7 @@ import pytest from housekeeper.store.models import Version -from cg.constants import DataDelivery, Pipeline +from cg.constants import DataDelivery, Workflow from cg.constants.constants import FileFormat, PrepCategory from cg.constants.housekeeper_tags import HK_DELIVERY_REPORT_TAG from cg.constants.scout import UploadTrack @@ -103,7 +103,7 @@ def rna_store( store=store, case_name="rna_case", customer=helpers.ensure_customer(store=store), - data_analysis=Pipeline.MIP_RNA, + data_analysis=Workflow.MIP_RNA, data_delivery=DataDelivery.SCOUT, ) rna_case.internal_id = rna_case_id @@ -154,7 +154,7 @@ def rna_store( store=store, case_name="dna_case", customer=helpers.ensure_customer(store=store), - data_analysis=Pipeline.MIP_DNA, + data_analysis=Workflow.MIP_DNA, data_delivery=DataDelivery.SCOUT, ) dna_case.internal_id = dna_case_id @@ -508,7 +508,7 @@ def mip_dna_analysis( store=analysis_store_trio, case=case, started_at=timestamp, - pipeline=Pipeline.MIP_DNA, + pipeline=Workflow.MIP_DNA, completed_at=timestamp, ) for link in case.links: @@ -527,24 +527,24 @@ def mip_dna_analysis( @pytest.fixture(name="balsamic_analysis_obj") def balsamic_analysis_obj(analysis_obj: Analysis) -> Analysis: """Return a Balsamic analysis object.""" - analysis_obj.pipeline = Pipeline.BALSAMIC + analysis_obj.pipeline = Workflow.BALSAMIC for link_object in analysis_obj.case.links: link_object.sample.application_version.application.prep_category = ( PrepCategory.WHOLE_EXOME_SEQUENCING ) - link_object.case.data_analysis = Pipeline.BALSAMIC + link_object.case.data_analysis = Workflow.BALSAMIC return analysis_obj @pytest.fixture(name="balsamic_umi_analysis_obj") def balsamic_umi_analysis_obj(analysis_obj: Analysis) -> Analysis: """Return a Balsamic UMI analysis object.""" - analysis_obj.pipeline = Pipeline.BALSAMIC_UMI + analysis_obj.pipeline = Workflow.BALSAMIC_UMI for link_object in analysis_obj.case.links: link_object.sample.application_version.application.prep_category = ( PrepCategory.WHOLE_EXOME_SEQUENCING ) - link_object.case.data_analysis = Pipeline.BALSAMIC_UMI + link_object.case.data_analysis = Workflow.BALSAMIC_UMI return analysis_obj @@ -552,12 +552,12 @@ def balsamic_umi_analysis_obj(analysis_obj: Analysis) -> Analysis: @pytest.fixture(name="rnafusion_analysis_obj") def rnafusion_analysis_obj(analysis_obj: Analysis) -> Analysis: """Return a RNAfusion analysis object.""" - analysis_obj.pipeline = Pipeline.RNAFUSION + analysis_obj.pipeline = Workflow.RNAFUSION for link_object in analysis_obj.case.links: link_object.sample.application_version.application.prep_category = ( PrepCategory.WHOLE_TRANSCRIPTOME_SEQUENCING ) - link_object.case.data_analysis = Pipeline.RNAFUSION + link_object.case.data_analysis = Workflow.RNAFUSION link_object.case.panels = None return analysis_obj @@ -621,7 +621,7 @@ def lims_api(lims_samples: list[dict]) -> MockLimsAPI: @pytest.fixture(name="mip_analysis_api") def mip_analysis_api(cg_context: CGConfig) -> MockMipAnalysis: """Return a MIP analysis API.""" - return MockMipAnalysis(config=cg_context, pipeline=Pipeline.MIP_DNA) + return MockMipAnalysis(config=cg_context, pipeline=Workflow.MIP_DNA) @pytest.fixture(name="upload_scout_api") @@ -634,7 +634,7 @@ def upload_scout_api( store: Store, ) -> UploadScoutAPI: """Return upload Scout API.""" - analysis_mock = MockMipAnalysis(config=cg_context, pipeline=Pipeline.MIP_DNA) + analysis_mock = MockMipAnalysis(config=cg_context, pipeline=Workflow.MIP_DNA) lims_api = MockLimsAPI(samples=lims_samples) return UploadScoutAPI( @@ -657,7 +657,7 @@ def upload_mip_analysis_scout_api( store: Store, ) -> Generator[UploadScoutAPI, None, None]: """Return MIP upload Scout API.""" - analysis_mock = MockMipAnalysis(config=cg_context, pipeline=Pipeline.MIP_DNA) + analysis_mock = MockMipAnalysis(config=cg_context, pipeline=Workflow.MIP_DNA) lims_api = MockLimsAPI(samples=lims_samples) yield UploadScoutAPI( @@ -680,7 +680,7 @@ def upload_balsamic_analysis_scout_api( store: Store, ) -> Generator[UploadScoutAPI, None, None]: """Return Balsamic upload Scout API.""" - analysis_mock = MockMipAnalysis(config=cg_context, pipeline=Pipeline.MIP_DNA) + analysis_mock = MockMipAnalysis(config=cg_context, pipeline=Workflow.MIP_DNA) lims_api = MockLimsAPI(samples=lims_samples) yield UploadScoutAPI( @@ -721,7 +721,7 @@ def upload_rnafusion_analysis_scout_api( store: Store, ) -> UploadScoutAPI: """Fixture for upload_scout_api.""" - analysis_mock = MockMipAnalysis(config=cg_context, pipeline=Pipeline.MIP_DNA) + analysis_mock = MockMipAnalysis(config=cg_context, pipeline=Workflow.MIP_DNA) lims_api = MockLimsAPI(samples=lims_samples) _api = UploadScoutAPI( diff --git a/tests/meta/upload/scout/test_generate_load_config.py b/tests/meta/upload/scout/test_generate_load_config.py index 23ddd052af..557ee00c3f 100644 --- a/tests/meta/upload/scout/test_generate_load_config.py +++ b/tests/meta/upload/scout/test_generate_load_config.py @@ -2,7 +2,7 @@ import pytest -from cg.constants import Pipeline +from cg.constants import Workflow from cg.meta.upload.scout.mip_config_builder import MipConfigBuilder from cg.meta.upload.scout.uploadscoutapi import UploadScoutAPI from cg.models.scout.scout_load_config import ( @@ -44,7 +44,7 @@ def test_generate_balsamic_load_config( balsamic_analysis_obj: Analysis, upload_balsamic_analysis_scout_api: UploadScoutAPI ): # GIVEN an analysis object that have been run with balsamic - assert balsamic_analysis_obj.pipeline == Pipeline.BALSAMIC + assert balsamic_analysis_obj.pipeline == Workflow.BALSAMIC # GIVEN an upload scout api with some balsamic information @@ -59,7 +59,7 @@ def test_generate_balsamic_umi_load_config( balsamic_umi_analysis_obj: Analysis, upload_balsamic_analysis_scout_api: UploadScoutAPI ): # GIVEN an analysis object that have been run with balsamic-umi - assert balsamic_umi_analysis_obj.pipeline == Pipeline.BALSAMIC_UMI + assert balsamic_umi_analysis_obj.pipeline == Workflow.BALSAMIC_UMI # GIVEN an upload scout api with some balsamic information @@ -77,7 +77,7 @@ def test_generate_rnafusion_load_config( ): """Test that a rnafusion config is generated.""" # GIVEN an analysis object that have been run with rnafusion - assert rnafusion_analysis_obj.pipeline == Pipeline.RNAFUSION + assert rnafusion_analysis_obj.pipeline == Workflow.RNAFUSION # GIVEN an upload scout api with some rnafusion information diff --git a/tests/meta/upload/scout/test_meta_upload_scoutapi_rna.py b/tests/meta/upload/scout/test_meta_upload_scoutapi_rna.py index b91767968d..47f36fa311 100644 --- a/tests/meta/upload/scout/test_meta_upload_scoutapi_rna.py +++ b/tests/meta/upload/scout/test_meta_upload_scoutapi_rna.py @@ -9,7 +9,7 @@ import cg.store as Store from cg.apps.housekeeper.hk import HousekeeperAPI -from cg.constants import Pipeline +from cg.constants import Workflow from cg.constants.scout import ScoutCustomCaseReportTags from cg.constants.sequencing import SequencingMethod from cg.exc import CgDataError @@ -64,7 +64,7 @@ def ensure_extra_rna_case_match( """Ensures that we have an extra RNA case that matches by subject_id the existing RNA case and DNA cases.""" rna_extra_case = helpers.ensure_case( store=rna_store, - data_analysis=Pipeline.MIP_RNA, + data_analysis=Workflow.MIP_RNA, customer=rna_store.get_case_by_internal_id(rna_case_id).customer, ) subject_id: str = get_subject_id_from_case(store=rna_store, case_id=rna_case_id) @@ -620,7 +620,7 @@ def test_map_dna_cases_to_dna_sample_incorrect_pipeline( rna_sample: Sample = rna_store.get_sample_by_internal_id(rna_sample_son_id) # GIVEN that the DNA case has a different pipeline than the expected pipeline - dna_case.data_analysis = Pipeline.FASTQ + dna_case.data_analysis = Workflow.FASTQ # WHEN mapping the DNA case name to the DNA sample name in the related DNA cases related_dna_cases: list[str] = upload_scout_api._dna_cases_related_to_dna_sample( diff --git a/tests/meta/workflow/conftest.py b/tests/meta/workflow/conftest.py index c9f6288378..a03b10aae0 100644 --- a/tests/meta/workflow/conftest.py +++ b/tests/meta/workflow/conftest.py @@ -6,7 +6,7 @@ import pytest -from cg.constants.constants import CaseActions, MicrosaltAppTags, MicrosaltQC, Pipeline +from cg.constants.constants import CaseActions, MicrosaltAppTags, MicrosaltQC, Workflow from cg.meta.compress.compress import CompressAPI from cg.meta.workflow.microsalt import MicrosaltAnalysisAPI from cg.meta.workflow.mip_dna import MipDNAAnalysisAPI @@ -193,7 +193,7 @@ def qc_microsalt_context( store=store, internal_id=microsalt_case_qc_pass, name=microsalt_case_qc_pass, - data_analysis=Pipeline.MICROSALT, + data_analysis=Workflow.MICROSALT, action=CaseActions.RUNNING, ) @@ -228,7 +228,7 @@ def qc_microsalt_context( store=store, internal_id=microsalt_case_qc_fail, name=microsalt_case_qc_fail, - data_analysis=Pipeline.MICROSALT, + data_analysis=Workflow.MICROSALT, ) for sample in qc_fail_microsalt_samples: diff --git a/tests/models/balsamic/test_balsamic_analysis.py b/tests/models/balsamic/test_balsamic_analysis.py index ca04693835..103bb37b22 100644 --- a/tests/models/balsamic/test_balsamic_analysis.py +++ b/tests/models/balsamic/test_balsamic_analysis.py @@ -1,4 +1,4 @@ -from cg.constants.constants import Pipeline +from cg.constants.constants import Workflow from cg.meta.workflow.balsamic import BalsamicAnalysisAPI from cg.models.balsamic.analysis import BalsamicAnalysis from cg.models.balsamic.config import BalsamicConfigJSON @@ -9,7 +9,7 @@ def test_instantiate_balsamic_analysis(cg_context, balsamic_config_raw, balsamic """Tests BALSAMIC analysis instance creation""" # GIVEN a config and metrics dictionaries and a BALSAMIC analysis API - balsamic_analysis_api = BalsamicAnalysisAPI(cg_context, Pipeline.BALSAMIC) + balsamic_analysis_api = BalsamicAnalysisAPI(cg_context, Workflow.BALSAMIC) # WHEN instantiating a BALSAMIC analysis object balsamic_analysis = balsamic_analysis_api.parse_analysis( diff --git a/tests/models/mip/test_mip_analysis.py b/tests/models/mip/test_mip_analysis.py index f18f412971..8fd94c2f11 100644 --- a/tests/models/mip/test_mip_analysis.py +++ b/tests/models/mip/test_mip_analysis.py @@ -1,4 +1,4 @@ -from cg.constants.constants import Pipeline +from cg.constants.constants import Workflow from cg.meta.workflow.mip import MipAnalysisAPI from cg.models.mip.mip_analysis import MipAnalysis @@ -26,7 +26,7 @@ def test_instantiate_parse_mip_analysis( Tests parse_analysis """ # GIVEN a dictionary with some metrics and a MIP analysis API - mip_analysis_api = MipAnalysisAPI(cg_context, Pipeline.MIP_DNA) + mip_analysis_api = MipAnalysisAPI(cg_context, Workflow.MIP_DNA) # WHEN instantiating a MipAnalysis object mip_dna_analysis = mip_analysis_api.parse_analysis( diff --git a/tests/models/report/test_validators.py b/tests/models/report/test_validators.py index 99313c3c93..51b7e01cf1 100644 --- a/tests/models/report/test_validators.py +++ b/tests/models/report/test_validators.py @@ -8,7 +8,7 @@ from _pytest.logging import LogCaptureFixture from pydantic import ValidationInfo -from cg.constants import NA_FIELD, NO_FIELD, REPORT_GENDER, YES_FIELD, Pipeline +from cg.constants import NA_FIELD, NO_FIELD, REPORT_GENDER, YES_FIELD, Workflow from cg.constants.constants import AnalysisType from cg.constants.subject import Sex from cg.models.orders.constants import OrderType @@ -186,7 +186,7 @@ def test_get_analysis_type_as_string(): # GIVEN a WGS analysis type and a model info dictionary analysis_type: AnalysisType = AnalysisType.WHOLE_GENOME_SEQUENCING model_info: ValidationInfo = ValidationInfo - model_info.data: dict[str, Any] = {"pipeline": Pipeline.MIP_DNA.value} + model_info.data: dict[str, Any] = {"pipeline": Workflow.MIP_DNA.value} # WHEN performing the validation validated_analysis_type: str = get_analysis_type_as_string( @@ -203,7 +203,7 @@ def test_get_analysis_type_as_string_balsamic(): # GIVEN a WGS analysis type and a model info dictionary analysis_type: str = "tumor_normal_wgs" model_info: ValidationInfo = ValidationInfo - model_info.data: dict[str, Any] = {"pipeline": Pipeline.BALSAMIC.value} + model_info.data: dict[str, Any] = {"pipeline": Workflow.BALSAMIC.value} # WHEN performing the validation validated_analysis_type: str = get_analysis_type_as_string( diff --git a/tests/server/conftest.py b/tests/server/conftest.py index 559af59e0f..284d031a50 100644 --- a/tests/server/conftest.py +++ b/tests/server/conftest.py @@ -9,7 +9,7 @@ from flask.testing import FlaskClient from mock import patch -from cg.constants import DataDelivery, Pipeline +from cg.constants import DataDelivery, Workflow from cg.server.ext import db as store from cg.store.database import create_all_tables, drop_all_tables from cg.store.models import Case, Customer, Order @@ -38,7 +38,7 @@ def app() -> Generator[Flask, None, None]: def case(helpers: StoreHelpers) -> Case: case: Case = helpers.add_case( customer_id=1, - data_analysis=Pipeline.MIP_DNA, + data_analysis=Workflow.MIP_DNA, data_delivery=DataDelivery.ANALYSIS_SCOUT, name="test case", ticket="123", @@ -85,7 +85,7 @@ def order_balsamic(helpers: StoreHelpers, customer_another: Customer) -> Order: customer_id=customer_another.id, ticket_id=3, order_date=datetime.now(), - workflow=Pipeline.BALSAMIC, + workflow=Workflow.BALSAMIC, ) return order diff --git a/tests/server/endpoints/test_orders_endpoint.py b/tests/server/endpoints/test_orders_endpoint.py index 2e2055aecd..688be9a61c 100644 --- a/tests/server/endpoints/test_orders_endpoint.py +++ b/tests/server/endpoints/test_orders_endpoint.py @@ -3,18 +3,18 @@ import pytest from flask.testing import FlaskClient -from cg.constants import Pipeline +from cg.constants import Workflow from cg.store.models import Order @pytest.mark.parametrize( "limit, workflow, expected_orders", [ - (None, Pipeline.MIP_DNA, 2), - (1, Pipeline.MIP_DNA, 1), - (2, Pipeline.MIP_DNA, 2), - (None, Pipeline.BALSAMIC, 1), - (None, Pipeline.FLUFFY, 0), + (None, Workflow.MIP_DNA, 2), + (1, Workflow.MIP_DNA, 1), + (2, Workflow.MIP_DNA, 2), + (None, Workflow.BALSAMIC, 1), + (None, Workflow.FLUFFY, 0), (None, None, 3), ], ) diff --git a/tests/store/api/conftest.py b/tests/store/api/conftest.py index b734031544..399be74b55 100644 --- a/tests/store/api/conftest.py +++ b/tests/store/api/conftest.py @@ -2,7 +2,7 @@ import pytest -from cg.constants import Pipeline +from cg.constants import Workflow from cg.constants.constants import PrepCategory from cg.constants.subject import PhenotypeStatus from cg.store.models import CaseSample @@ -24,7 +24,7 @@ def store_failing_sequencing_qc( store=store, internal_id="fluffy_case", name="fluffy_case", - data_analysis=Pipeline.FLUFFY, + data_analysis=Workflow.FLUFFY, ) store_sample = helpers.add_sample( diff --git a/tests/store/conftest.py b/tests/store/conftest.py index d15fb7a922..f1ef329851 100644 --- a/tests/store/conftest.py +++ b/tests/store/conftest.py @@ -7,7 +7,7 @@ import pytest -from cg.constants import Pipeline +from cg.constants import Workflow from cg.constants.priority import PriorityTerms from cg.constants.subject import PhenotypeStatus, Sex from cg.meta.orders.pool_submitter import PoolSubmitter @@ -115,7 +115,7 @@ def _get_item(name: str, internal_id: str, well_position: str, organism: str) -> organism=organism, reference_genome=ref_genomes[organism], extraction_method="MagNaPure 96 (contact Clinical Genomics before " "submission)", - analysis=str(Pipeline.FASTQ), + analysis=Workflow.FASTQ, concentration_sample="1", mother=None, father=None, @@ -326,9 +326,9 @@ def store_with_application_limitations( application=store_with_an_application_with_and_without_attributes.get_application_by_tag( StoreConstants.TAG_APPLICATION_WITH_ATTRIBUTES.value ), - pipeline=Pipeline.MIP_DNA, + pipeline=Workflow.MIP_DNA, ) - for pipeline in [Pipeline.MIP_DNA, Pipeline.BALSAMIC]: + for pipeline in [Workflow.MIP_DNA, Workflow.BALSAMIC]: helpers.ensure_application_limitation( store=store_with_an_application_with_and_without_attributes, application=store_with_an_application_with_and_without_attributes.get_application_by_tag( @@ -415,7 +415,7 @@ def store_with_older_and_newer_analyses( helpers.add_analysis( store=base_store, case=case, - pipeline=Pipeline.BALSAMIC, + pipeline=Workflow.BALSAMIC, started_at=time, completed_at=time, uploaded_at=time, @@ -557,7 +557,7 @@ def store_with_analyses_for_cases_not_uploaded_fluffy( started_at=timestamp_yesterday, uploaded_at=timestamp_yesterday, delivery_reported_at=None, - pipeline=Pipeline.FLUFFY, + pipeline=Workflow.FLUFFY, ) helpers.add_analysis( analysis_store, @@ -565,7 +565,7 @@ def store_with_analyses_for_cases_not_uploaded_fluffy( started_at=timestamp_now, uploaded_at=None, delivery_reported_at=None, - pipeline=Pipeline.FLUFFY, + pipeline=Workflow.FLUFFY, ) sample = helpers.add_sample(analysis_store, delivered_at=timestamp_now) link: CaseSample = analysis_store.relate_sample( diff --git a/tests/store/crud/conftest.py b/tests/store/crud/conftest.py index 2ab1ea8842..99a23504a5 100644 --- a/tests/store/crud/conftest.py +++ b/tests/store/crud/conftest.py @@ -3,7 +3,7 @@ import pytest -from cg.constants import Pipeline +from cg.constants import Workflow from cg.constants.constants import CustomerId, PrepCategory from cg.constants.subject import PhenotypeStatus from cg.store.models import CaseSample, Order @@ -172,7 +172,7 @@ def store_with_analyses_for_cases_not_uploaded_microsalt( started_at=timestamp_yesterday, uploaded_at=timestamp_yesterday, delivery_reported_at=None, - pipeline=Pipeline.MICROSALT, + pipeline=Workflow.MICROSALT, ) helpers.add_analysis( analysis_store, @@ -180,7 +180,7 @@ def store_with_analyses_for_cases_not_uploaded_microsalt( started_at=timestamp_now, uploaded_at=None, delivery_reported_at=None, - pipeline=Pipeline.MICROSALT, + pipeline=Workflow.MICROSALT, ) sample = helpers.add_sample(analysis_store, delivered_at=timestamp_now) link: CaseSample = analysis_store.relate_sample( @@ -210,7 +210,7 @@ def store_with_analyses_for_cases_to_deliver( uploaded_at=None, delivery_reported_at=None, completed_at=timestamp_yesterday, - pipeline=Pipeline.FLUFFY, + pipeline=Workflow.FLUFFY, ) helpers.add_analysis( analysis_store, @@ -219,7 +219,7 @@ def store_with_analyses_for_cases_to_deliver( uploaded_at=None, delivery_reported_at=None, completed_at=timestamp_now, - pipeline=Pipeline.MIP_DNA, + pipeline=Workflow.MIP_DNA, ) sample = helpers.add_sample(analysis_store, delivered_at=None) link: CaseSample = analysis_store.relate_sample( @@ -265,7 +265,7 @@ def re_sequenced_sample_store( store=re_sequenced_sample_store, internal_id=case_id, name=family_name, - data_analysis=Pipeline.FLUFFY, + data_analysis=Workflow.FLUFFY, ) store_sample = helpers.add_sample( @@ -408,6 +408,6 @@ def order_balsamic(helpers: StoreHelpers, store: Store) -> Order: customer_id=2, ticket_id=3, order_date=datetime.now(), - workflow=Pipeline.BALSAMIC, + workflow=Workflow.BALSAMIC, ) return order diff --git a/tests/store/crud/read/test_read.py b/tests/store/crud/read/test_read.py index 973d7370f3..85d9727ad1 100644 --- a/tests/store/crud/read/test_read.py +++ b/tests/store/crud/read/test_read.py @@ -5,7 +5,7 @@ from sqlalchemy.orm import Query from cg.constants import FlowCellStatus, Priority -from cg.constants.constants import CaseActions, MicrosaltAppTags, Pipeline +from cg.constants.constants import CaseActions, MicrosaltAppTags, Workflow from cg.constants.subject import PhenotypeStatus from cg.exc import CgError from cg.store.models import ( @@ -122,7 +122,7 @@ def test_case_in_uploaded_observations(helpers: StoreHelpers, sample_store: Stor """Test retrieval of uploaded observations.""" # GIVEN a case with observations that has been uploaded to Loqusdb - analysis: Analysis = helpers.add_analysis(store=sample_store, pipeline=Pipeline.MIP_DNA) + analysis: Analysis = helpers.add_analysis(store=sample_store, pipeline=Workflow.MIP_DNA) analysis.case.customer.loqus_upload = True sample: Sample = helpers.add_sample(sample_store, loqusdb_id=loqusdb_id) link = sample_store.relate_sample(analysis.case, sample, PhenotypeStatus.UNKNOWN) @@ -142,7 +142,7 @@ def test_case_not_in_uploaded_observations(helpers: StoreHelpers, sample_store: """Test retrieval of uploaded observations that have not been uploaded to Loqusdb.""" # GIVEN a case with observations that has not been uploaded to loqusdb - analysis: Analysis = helpers.add_analysis(store=sample_store, pipeline=Pipeline.MIP_DNA) + analysis: Analysis = helpers.add_analysis(store=sample_store, pipeline=Workflow.MIP_DNA) analysis.case.customer.loqus_upload = True sample: Sample = helpers.add_sample(sample_store) link = sample_store.relate_sample(analysis.case, sample, PhenotypeStatus.UNKNOWN) @@ -162,7 +162,7 @@ def test_case_in_observations_to_upload(helpers: StoreHelpers, sample_store: Sto """Test extraction of ready to be uploaded to Loqusdb cases.""" # GIVEN a case with completed analysis and samples w/o loqusdb_id - analysis: Analysis = helpers.add_analysis(store=sample_store, pipeline=Pipeline.MIP_DNA) + analysis: Analysis = helpers.add_analysis(store=sample_store, pipeline=Workflow.MIP_DNA) analysis.case.customer.loqus_upload = True sample: Sample = helpers.add_sample(sample_store) link = sample_store.relate_sample(analysis.case, sample, PhenotypeStatus.UNKNOWN) @@ -184,7 +184,7 @@ def test_case_not_in_observations_to_upload( """Test case extraction that should not be uploaded to Loqusdb.""" # GIVEN a case with completed analysis and samples with a Loqusdb ID - analysis: Analysis = helpers.add_analysis(store=sample_store, pipeline=Pipeline.MIP_DNA) + analysis: Analysis = helpers.add_analysis(store=sample_store, pipeline=Workflow.MIP_DNA) analysis.case.customer.loqus_upload = True sample: Sample = helpers.add_sample(sample_store, loqusdb_id=loqusdb_id) link = sample_store.relate_sample(analysis.case, sample, PhenotypeStatus.UNKNOWN) @@ -231,7 +231,7 @@ def test_analyses_to_upload_when_no_pipeline(helpers, sample_store, timestamp): def test_analyses_to_upload_when_analysis_has_pipeline(helpers, sample_store, timestamp): """Test analyses to upload to when existing pipeline.""" # GIVEN a store with an analysis that has been run with MIP - helpers.add_analysis(store=sample_store, completed_at=timestamp, pipeline=Pipeline.MIP_DNA) + helpers.add_analysis(store=sample_store, completed_at=timestamp, pipeline=Workflow.MIP_DNA) # WHEN fetching all analyses that are ready for upload and analysed with MIP records: list[Analysis] = [ @@ -245,7 +245,7 @@ def test_analyses_to_upload_when_analysis_has_pipeline(helpers, sample_store, ti def test_analyses_to_upload_when_filtering_with_pipeline(helpers, sample_store, timestamp): """Test analyses to upload to when existing pipeline and using it in filtering.""" # GIVEN a store with an analysis that is analysed with MIP - pipeline = Pipeline.MIP_DNA + pipeline = Workflow.MIP_DNA helpers.add_analysis(store=sample_store, completed_at=timestamp, pipeline=pipeline) # WHEN fetching all pipelines that are analysed with MIP @@ -261,7 +261,7 @@ def test_analyses_to_upload_when_filtering_with_pipeline(helpers, sample_store, def test_analyses_to_upload_with_pipeline_and_no_complete_at(helpers, sample_store, timestamp): """Test analyses to upload to when existing pipeline and using it in filtering.""" # GIVEN a store with an analysis that is analysed with MIP but does not have a completed_at - pipeline = Pipeline.MIP_DNA + pipeline = Workflow.MIP_DNA helpers.add_analysis(store=sample_store, completed_at=None, pipeline=pipeline) # WHEN fetching all analyses that are ready for upload and analysed by MIP @@ -276,12 +276,12 @@ def test_analyses_to_upload_with_pipeline_and_no_complete_at(helpers, sample_sto def test_analyses_to_upload_when_filtering_with_missing_pipeline(helpers, sample_store, timestamp): """Test analyses to upload to when missing pipeline and using it in filtering.""" # GIVEN a store with an analysis that has been analysed with "missing_pipeline" - helpers.add_analysis(store=sample_store, completed_at=timestamp, pipeline=Pipeline.MIP_DNA) + helpers.add_analysis(store=sample_store, completed_at=timestamp, pipeline=Workflow.MIP_DNA) # WHEN fetching all analyses that was analysed with MIP records: list[Analysis] = [ analysis_obj - for analysis_obj in sample_store.get_analyses_to_upload(pipeline=Pipeline.FASTQ) + for analysis_obj in sample_store.get_analyses_to_upload(pipeline=Workflow.FASTQ) ] # THEN no analysis object should be returned, since there were no MIP analyses @@ -920,7 +920,7 @@ def test_get_application_limitations_by_tag( def test_get_application_limitation_by_tag_and_pipeline( store_with_application_limitations: Store, tag: str = StoreConstants.TAG_APPLICATION_WITH_ATTRIBUTES.value, - pipeline: Pipeline = Pipeline.MIP_DNA, + pipeline: Workflow = Workflow.MIP_DNA, ) -> ApplicationLimitations: """Test get application limitations by application tag and pipeline.""" @@ -1554,9 +1554,9 @@ def test_get_orders_workflow_filter( # GIVEN a store with three orders, one of which is a Balsamic order # WHEN fetching only balsamic orders - orders: list[Order] = store.get_orders_by_workflow(workflow=Pipeline.BALSAMIC) + orders: list[Order] = store.get_orders_by_workflow(workflow=Workflow.BALSAMIC) # THEN only one should be returned - assert len(orders) == 1 and orders[0].workflow == Pipeline.BALSAMIC + assert len(orders) == 1 and orders[0].workflow == Workflow.BALSAMIC @pytest.mark.parametrize( @@ -1579,7 +1579,7 @@ def test_get_orders_mip_dna_and_limit_filter( # GIVEN a store with three orders, two of which are MIP-DNA orders # WHEN fetching only MIP-DNA orders - orders: list[Order] = store.get_orders_by_workflow(workflow=Pipeline.MIP_DNA, limit=limit) + orders: list[Order] = store.get_orders_by_workflow(workflow=Workflow.MIP_DNA, limit=limit) # THEN we should get the expected number of orders returned assert len(orders) == expected_returned diff --git a/tests/store/crud/read/test_read_analyses_to_clean.py b/tests/store/crud/read/test_read_analyses_to_clean.py index 81e8ce248a..3a160b0442 100644 --- a/tests/store/crud/read/test_read_analyses_to_clean.py +++ b/tests/store/crud/read/test_read_analyses_to_clean.py @@ -2,7 +2,7 @@ from datetime import datetime -from cg.constants import Pipeline +from cg.constants import Workflow from cg.store.models import CaseSample from cg.store.store import Store @@ -58,7 +58,7 @@ def test_pipeline_included( """Tests that analyses that are included depending on pipeline.""" # GIVEN an analysis that is uploaded and pipeline is specified - pipeline = Pipeline.BALSAMIC + pipeline = Workflow.BALSAMIC analysis = helpers.add_analysis( analysis_store, pipeline=pipeline, @@ -86,8 +86,8 @@ def test_pipeline_excluded(analysis_store: Store, helpers, timestamp_now: dateti # GIVEN an analysis that is uploaded - used_pipeline = Pipeline.BALSAMIC - wrong_pipeline = Pipeline.MIP_DNA + used_pipeline = Workflow.BALSAMIC + wrong_pipeline = Workflow.MIP_DNA analysis = helpers.add_analysis( analysis_store, pipeline=used_pipeline, diff --git a/tests/store/crud/read/test_read_analyses_to_delivery_report.py b/tests/store/crud/read/test_read_analyses_to_delivery_report.py index be7ecfcbae..5fbe4508b9 100644 --- a/tests/store/crud/read/test_read_analyses_to_delivery_report.py +++ b/tests/store/crud/read/test_read_analyses_to_delivery_report.py @@ -1,6 +1,6 @@ """This file tests the analyses_to_delivery_report part of the status api""" -from cg.constants import DataDelivery, Pipeline +from cg.constants import DataDelivery, Workflow from cg.constants.subject import PhenotypeStatus from cg.store.models import CaseSample from cg.store.store import Store @@ -12,7 +12,7 @@ def test_missing(analysis_store: Store, helpers: StoreHelpers, timestamp_now): """Tests that analyses that are completed, but lacks delivery report are returned.""" # GIVEN an analysis that is delivered but has no delivery report - pipeline = Pipeline.BALSAMIC + pipeline = Workflow.BALSAMIC analysis = helpers.add_analysis( analysis_store, started_at=timestamp_now, @@ -43,7 +43,7 @@ def test_outdated_analysis( # GIVEN an analysis that is older than Hasta timestamp_old_analysis = get_date("2017-09-26") - pipeline = Pipeline.BALSAMIC + pipeline = Workflow.BALSAMIC # GIVEN a delivery report created at date which is older than the upload date to trigger delivery report generation @@ -78,7 +78,7 @@ def test_analyses_to_upload_delivery_reports( """Tests extraction of analyses ready for delivery report upload""" # GIVEN an analysis that has a delivery report generated - pipeline = Pipeline.BALSAMIC + pipeline = Workflow.BALSAMIC analysis = helpers.add_analysis( analysis_store, started_at=timestamp_now, diff --git a/tests/store/crud/read/test_read_analysis.py b/tests/store/crud/read/test_read_analysis.py index 4cb6ec3909..e4bd6f0aa7 100644 --- a/tests/store/crud/read/test_read_analysis.py +++ b/tests/store/crud/read/test_read_analysis.py @@ -4,7 +4,7 @@ from sqlalchemy.orm import Query -from cg.constants import Pipeline +from cg.constants import Workflow from cg.constants.constants import CaseActions from cg.constants.subject import PhenotypeStatus from cg.store.models import Analysis, Case, CaseSample, Sample @@ -15,7 +15,7 @@ def test_get_latest_nipt_analysis_to_upload( store_with_analyses_for_cases_not_uploaded_fluffy: Store, timestamp_now: datetime, - pipeline: str = Pipeline.FLUFFY, + pipeline: str = Workflow.FLUFFY, ): """Test get the latest NIPT analysis to upload.""" # GIVEN an analysis that is not delivery reported but there exists a newer analysis @@ -37,7 +37,7 @@ def test_get_latest_nipt_analysis_to_upload( def test_get_latest_microsalt_analysis_to_upload( store_with_analyses_for_cases_not_uploaded_microsalt: Store, timestamp_now: datetime, - pipeline: str = Pipeline.MICROSALT, + pipeline: str = Workflow.MICROSALT, ): """Test get the latest microsalt analysis to upload.""" # GIVEN an analysis that is not delivery reported but there exists a newer analysis @@ -58,7 +58,7 @@ def test_get_latest_microsalt_analysis_to_upload( def test_get_analyses_to_deliver_for_pipeline( store_with_analyses_for_cases_to_deliver: Store, - pipeline: Pipeline = Pipeline.FLUFFY, + pipeline: Workflow = Workflow.FLUFFY, ): # GIVEN a store with multiple analyses to deliver @@ -95,7 +95,7 @@ def test_get_families_with_extended_models( # GIVEN a completed analysis test_analysis: Analysis = helpers.add_analysis( - base_store, completed_at=timestamp_now, pipeline=Pipeline.MIP_DNA + base_store, completed_at=timestamp_now, pipeline=Workflow.MIP_DNA ) # Given an action set to analyze @@ -114,7 +114,7 @@ def test_get_families_with_extended_models( assert cases # THEN analysis should be part of cases attributes - assert case.analyses[0].pipeline == Pipeline.MIP_DNA + assert case.analyses[0].pipeline == Workflow.MIP_DNA def test_get_families_with_extended_models_when_no_case(base_store: Store): @@ -139,7 +139,7 @@ def test_get_cases_with_samples_query( # GIVEN a completed analysis test_analysis: Analysis = helpers.add_analysis( - base_store, completed_at=timestamp_now, pipeline=Pipeline.MIP_DNA + base_store, completed_at=timestamp_now, pipeline=Workflow.MIP_DNA ) # GIVEN a database with a case with one of sequenced samples and completed analysis @@ -165,7 +165,7 @@ def test_that_many_cases_can_have_one_sample_each( ) # WHEN getting cases to analyse - cases: list[Case] = base_store.cases_to_analyze(pipeline=Pipeline.MIP_DNA) + cases: list[Case] = base_store.cases_to_analyze(pipeline=Workflow.MIP_DNA) # THEN cases should contain all cases since they are to be analysed assert len(cases) == len(test_cases) @@ -193,7 +193,7 @@ def test_that_cases_can_have_many_samples( base_store.session.add(link) # WHEN getting cases to analyse - cases: list[Case] = base_store.cases_to_analyze(pipeline=Pipeline.MIP_DNA) + cases: list[Case] = base_store.cases_to_analyze(pipeline=Workflow.MIP_DNA) # THEN cases should be returned assert cases @@ -214,7 +214,7 @@ def test_external_sample_to_re_analyse( # GIVEN a completed analysis test_analysis: Analysis = helpers.add_analysis( - base_store, completed_at=timestamp_now, pipeline=Pipeline.MIP_DNA + base_store, completed_at=timestamp_now, pipeline=Workflow.MIP_DNA ) assert test_analysis.completed_at @@ -226,7 +226,7 @@ def test_external_sample_to_re_analyse( base_store.session.add(link) # WHEN getting cases to analyse - cases: list[Case] = base_store.cases_to_analyze(pipeline=Pipeline.MIP_DNA) + cases: list[Case] = base_store.cases_to_analyze(pipeline=Workflow.MIP_DNA) # THEN cases should be returned assert cases @@ -242,14 +242,14 @@ def test_new_external_case_not_in_result(base_store: Store, helpers: StoreHelper test_sample: Sample = helpers.add_sample(base_store, is_external=True, last_sequenced_at=None) # GIVEN a cancer case - test_case: Case = helpers.add_case(base_store, data_analysis=Pipeline.BALSAMIC) + test_case: Case = helpers.add_case(base_store, data_analysis=Workflow.BALSAMIC) # GIVEN a database with a case with one externally sequenced samples for BALSAMIC analysis link = base_store.relate_sample(test_case, test_sample, PhenotypeStatus.UNKNOWN) base_store.session.add(link) # WHEN getting cases to analyse - cases: list[Case] = base_store.cases_to_analyze(pipeline=Pipeline.BALSAMIC) + cases: list[Case] = base_store.cases_to_analyze(pipeline=Workflow.BALSAMIC) # THEN cases should not contain the test case assert test_case not in cases @@ -264,7 +264,7 @@ def test_case_to_re_analyse(base_store: Store, helpers: StoreHelpers, timestamp_ # GIVEN a completed analysis test_analysis: Analysis = helpers.add_analysis( - base_store, completed_at=timestamp_now, pipeline=Pipeline.MIP_DNA + base_store, completed_at=timestamp_now, pipeline=Workflow.MIP_DNA ) # Given an action set to analyze @@ -275,7 +275,7 @@ def test_case_to_re_analyse(base_store: Store, helpers: StoreHelpers, timestamp_ base_store.session.add(link) # WHEN getting cases to analyse - cases: list[Case] = base_store.cases_to_analyze(pipeline=Pipeline.MIP_DNA) + cases: list[Case] = base_store.cases_to_analyze(pipeline=Workflow.MIP_DNA) # THEN cases should be returned assert cases @@ -304,7 +304,7 @@ def test_all_samples_and_analysis_completed( base_store.session.add(link) # WHEN getting cases to analyse - cases: list[Case] = base_store.cases_to_analyze(pipeline=Pipeline.MIP_DNA) + cases: list[Case] = base_store.cases_to_analyze(pipeline=Workflow.MIP_DNA) # THEN cases should not contain the test case assert not cases @@ -319,14 +319,14 @@ def test_specified_analysis_in_result( test_sample: Sample = helpers.add_sample(base_store, last_sequenced_at=timestamp_now) # GIVEN a cancer case - test_case: Case = helpers.add_case(base_store, data_analysis=Pipeline.BALSAMIC) + test_case: Case = helpers.add_case(base_store, data_analysis=Workflow.BALSAMIC) # GIVEN a database with a case with one sequenced samples for BALSAMIC analysis link = base_store.relate_sample(test_case, test_sample, PhenotypeStatus.UNKNOWN) base_store.session.add(link) # WHEN getting cases to analyse - cases: list[Case] = base_store.cases_to_analyze(pipeline=Pipeline.BALSAMIC) + cases: list[Case] = base_store.cases_to_analyze(pipeline=Workflow.BALSAMIC) # THEN cases should be returned assert cases @@ -345,14 +345,14 @@ def test_exclude_other_pipeline_analysis_from_result( test_sample: Sample = helpers.add_sample(base_store, last_sequenced_at=timestamp_now) # GIVEN a cancer case - test_case = helpers.add_case(base_store, data_analysis=Pipeline.BALSAMIC) + test_case = helpers.add_case(base_store, data_analysis=Workflow.BALSAMIC) # GIVEN a database with a case with one sequenced samples for specified analysis link = base_store.relate_sample(test_case, test_sample, PhenotypeStatus.UNKNOWN) base_store.session.add(link) # WHEN getting cases to analyse for another pipeline - cases: list[Case] = base_store.cases_to_analyze(pipeline=Pipeline.MIP_DNA) + cases: list[Case] = base_store.cases_to_analyze(pipeline=Workflow.MIP_DNA) # THEN cases should not contain the test case assert test_case not in cases @@ -383,7 +383,7 @@ def test_one_of_two_sequenced_samples( base_store.session.add_all([link_1, link_2]) # WHEN getting cases to analyse - cases: list[Case] = base_store.cases_to_analyze(pipeline=Pipeline.MIP_DNA, threshold=True) + cases: list[Case] = base_store.cases_to_analyze(pipeline=Workflow.MIP_DNA, threshold=True) # THEN no cases should be returned assert not cases @@ -407,7 +407,7 @@ def test_one_of_one_sequenced_samples( assert test_sample.last_sequenced_at is not None # WHEN getting cases to analyse - cases: list[Case] = base_store.cases_to_analyze(pipeline=Pipeline.MIP_DNA) + cases: list[Case] = base_store.cases_to_analyze(pipeline=Workflow.MIP_DNA) # THEN cases should be returned assert cases @@ -419,7 +419,7 @@ def test_one_of_one_sequenced_samples( def test_get_analyses_for_case_and_pipeline_before( store_with_analyses_for_cases_not_uploaded_fluffy: Store, timestamp_now: datetime, - pipeline: Pipeline = Pipeline.FLUFFY, + pipeline: Workflow = Workflow.FLUFFY, case_id: str = "yellowhog", ): """Test to get all analyses before a given date.""" @@ -466,7 +466,7 @@ def test_get_analyses_for_case_before( def test_get_analyses_for_pipeline_before( store_with_analyses_for_cases_not_uploaded_fluffy: Store, timestamp_now: datetime, - pipeline: Pipeline = Pipeline.FLUFFY, + pipeline: Workflow = Workflow.FLUFFY, ): """Test to get all analyses for a pipeline before a given date.""" diff --git a/tests/store/crud/read/test_read_case.py b/tests/store/crud/read/test_read_case.py index 114fac1a69..5af8e24dec 100644 --- a/tests/store/crud/read/test_read_case.py +++ b/tests/store/crud/read/test_read_case.py @@ -3,7 +3,7 @@ from datetime import datetime, timedelta from cg.constants import DataDelivery, Priority -from cg.constants.constants import CaseActions, Pipeline +from cg.constants.constants import CaseActions, Workflow from cg.store.models import Analysis, Case, CaseSample from cg.store.store import Store @@ -80,13 +80,13 @@ def test_get_running_cases_in_pipeline(store_with_cases_and_customers: Store): # WHEN getting cases with a pipeline and are running cases: list[Case] = store_with_cases_and_customers.get_running_cases_in_pipeline( - pipeline=Pipeline.MIP_DNA + pipeline=Workflow.MIP_DNA ) # THEN cases with the specified pipeline, and case action is returned for case in cases: assert case.action == CaseActions.RUNNING - assert case.data_analysis == Pipeline.MIP_DNA + assert case.data_analysis == Workflow.MIP_DNA def test_delivered_at_affects_tat(base_store: Store, helpers): @@ -629,7 +629,7 @@ def test_include_case_by_case_uppercase_data_analysis(base_store: Store, helpers """Test to that cases can be included by uppercase data_analysis""" # GIVEN a database with a case with data analysis set - data_analysis = Pipeline.BALSAMIC + data_analysis = Workflow.BALSAMIC new_case = add_case(helpers, base_store, data_analysis=data_analysis) # WHEN getting active cases by data_analysis @@ -645,7 +645,7 @@ def test_exclude_case_by_data_analysis(base_store: Store, helpers): """Test to that cases can be excluded by data_analysis""" # GIVEN a database with a case with data analysis set - add_case(helpers, base_store, data_analysis=Pipeline.BALSAMIC) + add_case(helpers, base_store, data_analysis=Workflow.BALSAMIC) # WHEN getting active cases by data_analysis cases = base_store.cases(data_analysis="dummy_analysis") @@ -658,7 +658,7 @@ def test_include_case_by_partial_data_analysis(base_store: Store, helpers): """Test to that cases can be included by data_analysis""" # GIVEN a database with a case with data analysis set - data_analysis = Pipeline.BALSAMIC + data_analysis = Workflow.BALSAMIC new_case = add_case(helpers, base_store, data_analysis=data_analysis) # WHEN getting active cases by partial data_analysis @@ -674,7 +674,7 @@ def test_show_multiple_data_analysis(base_store: Store, helpers): """Test to that cases can be included by data_analysis""" # GIVEN a database with a case with data analysis set - data_analysis = Pipeline.BALSAMIC + data_analysis = Workflow.BALSAMIC new_case = add_case(helpers, base_store, data_analysis=data_analysis) sample1 = helpers.add_sample(base_store) link_1: CaseSample = base_store.relate_sample(new_case, sample1, "unknown") @@ -699,7 +699,7 @@ def test_show_data_analysis(base_store: Store, helpers): """Test to that cases can be included by data_analysis""" # GIVEN a database with a case with data analysis set - data_analysis = Pipeline.BALSAMIC + data_analysis = Workflow.BALSAMIC new_case = add_case(helpers, base_store, data_analysis=data_analysis) # WHEN getting active cases by data_analysis @@ -715,7 +715,7 @@ def test_include_case_by_data_analysis(base_store: Store, helpers): """Test to that cases can be included by data_analysis""" # GIVEN a database with a case with data analysis set - data_analysis = Pipeline.BALSAMIC + data_analysis = Workflow.BALSAMIC new_case = add_case(helpers, base_store, data_analysis=data_analysis) # WHEN getting active cases by data_analysis @@ -1348,7 +1348,7 @@ def test_analysis_pipeline(base_store: Store, helpers): """Test to that cases displays pipeline""" # GIVEN a database with an analysis that has pipeline - pipeline = Pipeline.BALSAMIC + pipeline = Workflow.BALSAMIC analysis = helpers.add_analysis(base_store, pipeline=pipeline) assert analysis.pipeline is not None @@ -1612,7 +1612,7 @@ def add_case( ordered_days_ago=0, action=None, priority=None, - data_analysis=Pipeline.BALSAMIC, + data_analysis=Workflow.BALSAMIC, data_delivery=DataDelivery.SCOUT, ticket="123456", ): diff --git a/tests/store/filters/test_status_analyses_filters.py b/tests/store/filters/test_status_analyses_filters.py index 372b9d088d..2393b43d15 100644 --- a/tests/store/filters/test_status_analyses_filters.py +++ b/tests/store/filters/test_status_analyses_filters.py @@ -2,7 +2,7 @@ from sqlalchemy.orm import Query -from cg.constants.constants import Pipeline +from cg.constants.constants import Workflow from cg.store.filters.status_analysis_filters import ( filter_analyses_by_case_entry_id, filter_analyses_by_started_at, @@ -56,14 +56,14 @@ def test_filter_analyses_with_pipeline(base_store: Store, helpers: StoreHelpers, """Test analyses filtering by pipeline.""" # GIVEN a set of mock analyses - balsamic_analysis: Analysis = helpers.add_analysis(store=base_store, pipeline=Pipeline.BALSAMIC) + balsamic_analysis: Analysis = helpers.add_analysis(store=base_store, pipeline=Workflow.BALSAMIC) mip_analysis: Analysis = helpers.add_analysis( - store=base_store, case=case, pipeline=Pipeline.MIP_DNA + store=base_store, case=case, pipeline=Workflow.MIP_DNA ) # WHEN extracting the analyses analyses: Query = filter_analyses_with_pipeline( - analyses=base_store._get_query(table=Analysis), pipeline=Pipeline.BALSAMIC + analyses=base_store._get_query(table=Analysis), pipeline=Workflow.BALSAMIC ) # ASSERT that analyses is a query @@ -170,14 +170,14 @@ def test_filter_report_analyses_by_pipeline(base_store: Store, helpers: StoreHel """Test filtering delivery report related analysis by pipeline.""" # GIVEN a set of mock analysis - balsamic_analysis: Analysis = helpers.add_analysis(store=base_store, pipeline=Pipeline.BALSAMIC) + balsamic_analysis: Analysis = helpers.add_analysis(store=base_store, pipeline=Workflow.BALSAMIC) fluffy_analysis: Analysis = helpers.add_analysis( - store=base_store, case=case, pipeline=Pipeline.FLUFFY + store=base_store, case=case, pipeline=Workflow.FLUFFY ) # WHEN filtering delivery report related analyses analyses: Query = filter_report_analyses_by_pipeline( - analyses=base_store._get_query(table=Analysis), pipeline=Pipeline.BALSAMIC + analyses=base_store._get_query(table=Analysis), pipeline=Workflow.BALSAMIC ) # ASSERT that analyses is a query diff --git a/tests/store/filters/test_status_application_limitations_filters.py b/tests/store/filters/test_status_application_limitations_filters.py index 4f68f8da27..d1a4b5632a 100644 --- a/tests/store/filters/test_status_application_limitations_filters.py +++ b/tests/store/filters/test_status_application_limitations_filters.py @@ -1,6 +1,6 @@ from sqlalchemy.orm import Query -from cg.constants import Pipeline +from cg.constants import Workflow from cg.store.filters.status_application_limitations_filters import ( filter_application_limitations_by_pipeline, filter_application_limitations_by_tag, @@ -36,7 +36,7 @@ def test_filter_application_limitations_by_tag( def test_filter_application_limitations_by_pipeline( store_with_application_limitations: Store, - pipeline=Pipeline.BALSAMIC, + pipeline=Workflow.BALSAMIC, ) -> None: """Test to get application limitations by pipeline.""" diff --git a/tests/store/filters/test_status_cases_filters.py b/tests/store/filters/test_status_cases_filters.py index 8dd110f54a..a9f0628584 100644 --- a/tests/store/filters/test_status_cases_filters.py +++ b/tests/store/filters/test_status_cases_filters.py @@ -2,7 +2,7 @@ from sqlalchemy.orm import Query -from cg.constants.constants import CaseActions, DataDelivery, Pipeline +from cg.constants.constants import CaseActions, DataDelivery, Workflow from cg.constants.sequencing import SequencingMethod from cg.constants.subject import PhenotypeStatus from cg.store.filters.status_case_filters import ( @@ -149,7 +149,7 @@ def test_filter_cases_with_pipeline_when_correct_pipline( test_sample: Sample = helpers.add_sample(base_store, last_sequenced_at=timestamp_now) # GIVEN a cancer case - test_case = helpers.add_case(base_store, data_analysis=Pipeline.BALSAMIC) + test_case = helpers.add_case(base_store, data_analysis=Workflow.BALSAMIC) # GIVEN a database with a case with one sequenced samples for specified analysis link = base_store.relate_sample(test_case, test_sample, PhenotypeStatus.UNKNOWN) @@ -159,7 +159,7 @@ def test_filter_cases_with_pipeline_when_correct_pipline( cases: Query = base_store._get_outer_join_cases_with_analyses_query() # WHEN getting cases to analyse for another pipeline - cases: list[Query] = list(filter_cases_with_pipeline(cases=cases, pipeline=Pipeline.BALSAMIC)) + cases: list[Query] = list(filter_cases_with_pipeline(cases=cases, pipeline=Workflow.BALSAMIC)) # THEN cases should contain the test case assert cases @@ -174,7 +174,7 @@ def test_filter_cases_with_pipeline_when_incorrect_pipline( test_sample: Sample = helpers.add_sample(base_store, last_sequenced_at=timestamp_now) # GIVEN a cancer case - test_case: Case = helpers.add_case(base_store, data_analysis=Pipeline.BALSAMIC) + test_case: Case = helpers.add_case(base_store, data_analysis=Workflow.BALSAMIC) # GIVEN a database with a case with one sequenced samples for specified analysis link = base_store.relate_sample(test_case, test_sample, PhenotypeStatus.UNKNOWN) @@ -184,7 +184,7 @@ def test_filter_cases_with_pipeline_when_incorrect_pipline( cases: Query = base_store._get_outer_join_cases_with_analyses_query() # WHEN getting cases to analyse for another pipeline - cases: list[Query] = list(filter_cases_with_pipeline(cases=cases, pipeline=Pipeline.MIP_DNA)) + cases: list[Query] = list(filter_cases_with_pipeline(cases=cases, pipeline=Workflow.MIP_DNA)) # THEN cases should not contain the test case assert not cases @@ -199,10 +199,10 @@ def test_filter_cases_with_loqusdb_supported_pipeline( test_sample: Sample = helpers.add_sample(base_store, last_sequenced_at=timestamp_now) # GIVEN a MIP-DNA and a FLUFFY case - test_mip_case: Case = helpers.add_case(base_store, data_analysis=Pipeline.MIP_DNA) + test_mip_case: Case = helpers.add_case(base_store, data_analysis=Workflow.MIP_DNA) test_mip_case.customer.loqus_upload = True test_fluffy_case: Case = helpers.add_case( - base_store, name="test", data_analysis=Pipeline.FLUFFY + base_store, name="test", data_analysis=Workflow.FLUFFY ) test_fluffy_case.customer.loqus_upload = True @@ -237,7 +237,7 @@ def test_filter_cases_with_loqusdb_supported_sequencing_method( ) # GIVEN a MIP-DNA associated test case - test_case_wes: Case = helpers.add_case(base_store, data_analysis=Pipeline.MIP_DNA) + test_case_wes: Case = helpers.add_case(base_store, data_analysis=Workflow.MIP_DNA) link = base_store.relate_sample(test_case_wes, test_sample_wes, PhenotypeStatus.UNKNOWN) base_store.session.add(link) @@ -246,7 +246,7 @@ def test_filter_cases_with_loqusdb_supported_sequencing_method( # WHEN retrieving the available cases cases: Query = filter_cases_with_loqusdb_supported_sequencing_method( - cases=cases, pipeline=Pipeline.MIP_DNA + cases=cases, pipeline=Workflow.MIP_DNA ) # ASSERT that cases is a query @@ -267,7 +267,7 @@ def test_filter_cases_with_loqusdb_supported_sequencing_method_empty( ) # GIVEN a MIP-DNA associated test case - test_case_wts: Case = helpers.add_case(base_store, data_analysis=Pipeline.MIP_DNA) + test_case_wts: Case = helpers.add_case(base_store, data_analysis=Workflow.MIP_DNA) link = base_store.relate_sample(test_case_wts, test_sample_wts, PhenotypeStatus.UNKNOWN) base_store.session.add(link) @@ -276,7 +276,7 @@ def test_filter_cases_with_loqusdb_supported_sequencing_method_empty( # WHEN retrieving the valid cases cases: Query = filter_cases_with_loqusdb_supported_sequencing_method( - cases=cases, pipeline=Pipeline.MIP_DNA + cases=cases, pipeline=Workflow.MIP_DNA ) # ASSERT that cases is a query @@ -296,7 +296,7 @@ def test_filter_cases_for_analysis( # GIVEN a completed analysis test_analysis: Analysis = helpers.add_analysis( - base_store, completed_at=timestamp_now, pipeline=Pipeline.MIP_DNA + base_store, completed_at=timestamp_now, pipeline=Workflow.MIP_DNA ) # Given an action set to analyze @@ -363,7 +363,7 @@ def test_filter_cases_for_analysis_when_cases_with_no_action_and_new_sequence_da ) # GIVEN a completed analysis - test_analysis: Analysis = helpers.add_analysis(base_store, pipeline=Pipeline.MIP_DNA) + test_analysis: Analysis = helpers.add_analysis(base_store, pipeline=Workflow.MIP_DNA) # Given an action set to None test_analysis.case.action = None @@ -399,7 +399,7 @@ def test_filter_cases_for_analysis_when_cases_with_no_action_and_old_sequence_da ) # GIVEN a completed analysis - test_analysis: Analysis = helpers.add_analysis(base_store, pipeline=Pipeline.MIP_DNA) + test_analysis: Analysis = helpers.add_analysis(base_store, pipeline=Workflow.MIP_DNA) # Given an action set to None test_analysis.case.action: str | None = None diff --git a/tests/store/test_delivery.py b/tests/store/test_delivery.py index 7ef89a46b0..ba37d185a5 100644 --- a/tests/store/test_delivery.py +++ b/tests/store/test_delivery.py @@ -3,7 +3,7 @@ import datetime as dt from cg.constants import DataDelivery -from cg.constants.constants import Pipeline +from cg.constants.constants import Workflow from cg.store.models import Case, Sample @@ -16,7 +16,7 @@ def test_get_delivery_arguments(case: Case): delivery_types: set[str] = case.get_delivery_arguments() # THEN the correct delivery types should be returned - assert delivery_types == {Pipeline.MIP_DNA, Pipeline.FASTQ} + assert delivery_types == {Workflow.MIP_DNA, Workflow.FASTQ} def test_list_samples_to_deliver(base_store, helpers): diff --git a/tests/store_helpers.py b/tests/store_helpers.py index 60c097ec59..392751baf7 100644 --- a/tests/store_helpers.py +++ b/tests/store_helpers.py @@ -7,7 +7,7 @@ from housekeeper.store.models import Bundle, Version from cg.apps.housekeeper.hk import HousekeeperAPI -from cg.constants import DataDelivery, Pipeline +from cg.constants import DataDelivery, Workflow from cg.constants.pedigree import Pedigree from cg.constants.priority import PriorityTerms from cg.constants.sequencing import Sequencers @@ -241,7 +241,7 @@ def add_application( def ensure_application_limitation( store: Store, application: Application, - pipeline: str = Pipeline.MIP_DNA, + pipeline: str = Workflow.MIP_DNA, limitations: str = "Dummy limitations", **kwargs, ) -> ApplicationLimitations: @@ -319,7 +319,7 @@ def add_analysis( upload_started: datetime = None, delivery_reported_at: datetime = None, cleaned_at: datetime = None, - pipeline: Pipeline = Pipeline.BALSAMIC, + pipeline: Workflow = Workflow.BALSAMIC, pipeline_version: str = "1.0", data_delivery: DataDelivery = DataDelivery.FASTQ_QC, uploading: bool = False, @@ -438,7 +438,7 @@ def ensure_panel( def add_case( store: Store, name: str = "case_test", - data_analysis: str = Pipeline.MIP_DNA, + data_analysis: str = Workflow.MIP_DNA, data_delivery: DataDelivery = DataDelivery.SCOUT, action: str = None, internal_id: str = None, @@ -487,7 +487,7 @@ def add_order( customer_id: int, ticket_id: int, order_date: datetime = datetime(year=2023, month=12, day=24), - workflow: Pipeline = Pipeline.MIP_DNA, + workflow: Workflow = Workflow.MIP_DNA, ) -> Order: order = Order( customer_id=customer_id, ticket_id=ticket_id, order_date=order_date, workflow=workflow @@ -502,7 +502,7 @@ def ensure_case( case_name: str = "test-case", case_id: str = "blueeagle", customer: Customer = None, - data_analysis: Pipeline = Pipeline.MIP_DNA, + data_analysis: Workflow = Workflow.MIP_DNA, data_delivery: DataDelivery = DataDelivery.SCOUT, action: str = None, ): @@ -541,7 +541,7 @@ def ensure_case_from_dict( panels=case_info["panels"], internal_id=case_info["internal_id"], ordered_at=ordered_at, - data_analysis=case_info.get("data_analysis", str(Pipeline.MIP_DNA)), + data_analysis=case_info.get("data_analysis", Workflow.MIP_DNA), data_delivery=case_info.get("data_delivery", str(DataDelivery.SCOUT)), created_at=created_at, action=case_info.get("action"), @@ -583,7 +583,7 @@ def ensure_case_from_dict( StoreHelpers.add_analysis( store, - pipeline=Pipeline.MIP_DNA, + pipeline=Workflow.MIP_DNA, case=case, completed_at=completed_at or datetime.now(), started_at=started_at or datetime.now(), @@ -648,7 +648,7 @@ def add_microbial_sample( store=store, case_name=str(ticket), customer=customer, - data_analysis=Pipeline.MICROSALT, + data_analysis=Workflow.MICROSALT, data_delivery=DataDelivery.FASTQ_QC, ) StoreHelpers.add_relationship(store=store, case=case, sample=sample) diff --git a/tests/utils/test_dispatcher.py b/tests/utils/test_dispatcher.py index 46c3b4ceb6..1839146380 100644 --- a/tests/utils/test_dispatcher.py +++ b/tests/utils/test_dispatcher.py @@ -2,7 +2,7 @@ import pytest -from cg.constants import Pipeline +from cg.constants import Workflow from cg.constants.constants import CustomerId from cg.store.models import Analysis, Sample from cg.store.store import Store @@ -169,7 +169,7 @@ def test_dispatcher_on_other_functions( helpers: StoreHelpers, timestamp_now: datetime, timestamp_yesterday: datetime, - pipeline: Pipeline = Pipeline.MIP_DNA, + pipeline: Workflow = Workflow.MIP_DNA, case_internal_id: str = "test_case", ): """Test that the dispatcher can be used to call functions in the status db""" @@ -177,8 +177,8 @@ def test_dispatcher_on_other_functions( # GIVEN a database with a case and an analysis case = helpers.add_case(store, internal_id=case_internal_id) helpers.add_analysis(store, case=case, started_at=timestamp_yesterday, pipeline=pipeline) - helpers.add_analysis(store, case=case, started_at=timestamp_now, pipeline=Pipeline.FLUFFY) - helpers.add_analysis(store, case=case, started_at=timestamp_yesterday, pipeline=Pipeline.FLUFFY) + helpers.add_analysis(store, case=case, started_at=timestamp_now, pipeline=Workflow.FLUFFY) + helpers.add_analysis(store, case=case, started_at=timestamp_yesterday, pipeline=Workflow.FLUFFY) # WHEN calling the dispatcher with the to get analyses function_dispatcher: Dispatcher = Dispatcher(