|
2 | 2 | from pathlib import Path
|
3 | 3 | from typing import Callable, Optional, Type
|
4 | 4 |
|
5 |
| -from housekeeper.store.models import File |
| 5 | +from housekeeper.store.models import Archive, File |
6 | 6 | from pydantic import BaseModel, ConfigDict
|
7 | 7 |
|
8 | 8 | from cg.apps.housekeeper.hk import HousekeeperAPI
|
|
16 | 16 |
|
17 | 17 | LOG = logging.getLogger(__name__)
|
18 | 18 | DEFAULT_SPRING_ARCHIVE_COUNT = 200
|
| 19 | +ARCHIVE_HANDLERS: dict[str, Type[ArchiveHandler]] = { |
| 20 | + ArchiveLocations.KAROLINSKA_BUCKET: DDNDataFlowClient |
| 21 | +} |
19 | 22 |
|
20 | 23 |
|
21 | 24 | class ArchiveModels(BaseModel):
|
@@ -53,11 +56,6 @@ def filter_samples_on_archive_location(
|
53 | 56 | ]
|
54 | 57 |
|
55 | 58 |
|
56 |
| -ARCHIVE_HANDLERS: dict[str, Type[ArchiveHandler]] = { |
57 |
| - ArchiveLocations.KAROLINSKA_BUCKET: DDNDataFlowClient |
58 |
| -} |
59 |
| - |
60 |
| - |
61 | 59 | class SpringArchiveAPI:
|
62 | 60 | """Class handling the archiving of sample SPRING files to an off-premise location for long
|
63 | 61 | term storage."""
|
@@ -188,7 +186,125 @@ def add_samples_to_files(self, files_to_archive: list[File]) -> list[FileAndSamp
|
188 | 186 | adds it to the list which is returned."""
|
189 | 187 | files_and_samples: list[FileAndSample] = []
|
190 | 188 | for file in files_to_archive:
|
191 |
| - sample: Optional[Sample] = self.get_sample(file) |
192 |
| - if sample: |
| 189 | + if sample := self.get_sample(file): |
193 | 190 | files_and_samples.append(FileAndSample(file=file, sample=sample))
|
194 | 191 | return files_and_samples
|
| 192 | + |
| 193 | + def update_status_for_ongoing_tasks(self) -> None: |
| 194 | + """Updates any completed jobs with a finished timestamp.""" |
| 195 | + self.update_ongoing_archivals() |
| 196 | + self.update_ongoing_retrievals() |
| 197 | + |
| 198 | + def update_ongoing_archivals(self) -> None: |
| 199 | + ongoing_archivals: list[Archive] = self.housekeeper_api.get_ongoing_archivals() |
| 200 | + archival_ids_per_location: dict[ |
| 201 | + ArchiveLocations, list[int] |
| 202 | + ] = self.sort_archival_ids_on_archive_location(ongoing_archivals) |
| 203 | + for archive_location in ArchiveLocations: |
| 204 | + self.update_archival_jobs_for_archive_location( |
| 205 | + archive_location=archive_location, |
| 206 | + job_ids=archival_ids_per_location.get(archive_location), |
| 207 | + ) |
| 208 | + |
| 209 | + def update_ongoing_retrievals(self) -> None: |
| 210 | + ongoing_retrievals: list[Archive] = self.housekeeper_api.get_ongoing_retrievals() |
| 211 | + retrieval_ids_per_location: dict[ |
| 212 | + ArchiveLocations, list[int] |
| 213 | + ] = self.sort_retrieval_ids_on_archive_location(ongoing_retrievals) |
| 214 | + for archive_location in ArchiveLocations: |
| 215 | + self.update_retrieval_jobs_for_archive_location( |
| 216 | + archive_location=archive_location, |
| 217 | + job_ids=retrieval_ids_per_location.get(archive_location), |
| 218 | + ) |
| 219 | + |
| 220 | + def update_archival_jobs_for_archive_location( |
| 221 | + self, archive_location: ArchiveLocations, job_ids: list[int] |
| 222 | + ) -> None: |
| 223 | + for job_id in job_ids: |
| 224 | + self.update_ongoing_task( |
| 225 | + task_id=job_id, archive_location=archive_location, is_archival=True |
| 226 | + ) |
| 227 | + |
| 228 | + def update_retrieval_jobs_for_archive_location( |
| 229 | + self, archive_location: ArchiveLocations, job_ids: list[int] |
| 230 | + ) -> None: |
| 231 | + for job_id in job_ids: |
| 232 | + self.update_ongoing_task( |
| 233 | + task_id=job_id, archive_location=archive_location, is_archival=False |
| 234 | + ) |
| 235 | + |
| 236 | + def update_ongoing_task( |
| 237 | + self, task_id: int, archive_location: ArchiveLocations, is_archival: bool |
| 238 | + ) -> None: |
| 239 | + """Fetches info on an ongoing job and updates the Archive entry in Housekeeper.""" |
| 240 | + archive_handler: ArchiveHandler = ARCHIVE_HANDLERS[archive_location](self.data_flow_config) |
| 241 | + is_job_done: bool = archive_handler.is_job_done(task_id) |
| 242 | + if is_job_done: |
| 243 | + LOG.info(f"Job with id {task_id} has finished, updating Archive entries.") |
| 244 | + if is_archival: |
| 245 | + self.housekeeper_api.set_archived_at(task_id) |
| 246 | + else: |
| 247 | + self.housekeeper_api.set_retrieved_at(task_id) |
| 248 | + else: |
| 249 | + LOG.info(f"Job with id {task_id} has not yet finished.") |
| 250 | + |
| 251 | + def sort_archival_ids_on_archive_location( |
| 252 | + self, archive_entries: list[Archive] |
| 253 | + ) -> dict[ArchiveLocations, list[int]]: |
| 254 | + """Returns a dictionary with keys being ArchiveLocations and the values being the subset of the given |
| 255 | + archival jobs which should be archived there.""" |
| 256 | + |
| 257 | + jobs_per_location: dict[ArchiveLocations, list[int]] = {} |
| 258 | + jobs_and_locations: set[ |
| 259 | + tuple[int, ArchiveLocations] |
| 260 | + ] = self.get_unique_archival_ids_and_their_archive_location(archive_entries) |
| 261 | + |
| 262 | + for archive_location in ArchiveLocations: |
| 263 | + jobs_per_location[ArchiveLocations(archive_location)] = [ |
| 264 | + job_and_location[0] |
| 265 | + for job_and_location in jobs_and_locations |
| 266 | + if job_and_location[1] == archive_location |
| 267 | + ] |
| 268 | + return jobs_per_location |
| 269 | + |
| 270 | + def get_unique_archival_ids_and_their_archive_location( |
| 271 | + self, archive_entries: list[Archive] |
| 272 | + ) -> set[tuple[int, ArchiveLocations]]: |
| 273 | + return set( |
| 274 | + [ |
| 275 | + (archive.archiving_task_id, self.get_archive_location_from_file(archive.file)) |
| 276 | + for archive in archive_entries |
| 277 | + ] |
| 278 | + ) |
| 279 | + |
| 280 | + def sort_retrieval_ids_on_archive_location( |
| 281 | + self, archive_entries: list[Archive] |
| 282 | + ) -> dict[ArchiveLocations, list[int]]: |
| 283 | + """Returns a dictionary with keys being ArchiveLocations and the values being the subset of the given |
| 284 | + retrieval jobs which should be archived there.""" |
| 285 | + jobs_per_location: dict[ArchiveLocations, list[int]] = {} |
| 286 | + jobs_and_locations: set[ |
| 287 | + tuple[int, ArchiveLocations] |
| 288 | + ] = self.get_unique_retrieval_ids_and_their_archive_location(archive_entries) |
| 289 | + for archive_location in ArchiveLocations: |
| 290 | + jobs_per_location[ArchiveLocations(archive_location)] = [ |
| 291 | + job_and_location[0] |
| 292 | + for job_and_location in jobs_and_locations |
| 293 | + if job_and_location[1] == archive_location |
| 294 | + ] |
| 295 | + return jobs_per_location |
| 296 | + |
| 297 | + def get_unique_retrieval_ids_and_their_archive_location( |
| 298 | + self, archive_entries: list[Archive] |
| 299 | + ) -> set[tuple[int, ArchiveLocations]]: |
| 300 | + return set( |
| 301 | + [ |
| 302 | + (archive.retrieval_task_id, self.get_archive_location_from_file(archive.file)) |
| 303 | + for archive in archive_entries |
| 304 | + ] |
| 305 | + ) |
| 306 | + |
| 307 | + def get_archive_location_from_file(self, file: File) -> ArchiveLocations: |
| 308 | + return ArchiveLocations( |
| 309 | + self.status_db.get_sample_by_internal_id(file.version.bundle.name).archive_location |
| 310 | + ) |
0 commit comments