diff --git a/requirements.txt b/requirements.txt index 865c6b7..9e0392c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,5 @@ +numpy <2.0 # needed for older Torch +torch <2.3 # fixme: freeze until AMP compatibility is resolved lightning >=2.0.0 hivemind >=1.1.0, <=1.1.5; sys_platform == 'linux' diff --git a/src/lightning_hivemind/strategy.py b/src/lightning_hivemind/strategy.py index fe6b809..07c1317 100644 --- a/src/lightning_hivemind/strategy.py +++ b/src/lightning_hivemind/strategy.py @@ -277,7 +277,7 @@ def _wrap_schedulers(self, opt: "hivemind.Optimizer") -> None: raise ValueError( f"The `ReduceLROnPlateau` scheduler is not currently supported with `{self.__class__.__name__}`." ) - scheduler_config.scheduler = HiveMindScheduler(optimizer=opt, scheduler=scheduler) + scheduler_config.scheduler = HiveMindScheduler(optimizer=opt, scheduler=scheduler) # type: ignore[assignment] def on_train_batch_start(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> None: if not self._hivemind_initialized: