diff --git a/fedot/api/api_utils/api_composer.py b/fedot/api/api_utils/api_composer.py index cc3bae010d..44bb6fc7bb 100644 --- a/fedot/api/api_utils/api_composer.py +++ b/fedot/api/api_utils/api_composer.py @@ -102,7 +102,7 @@ def propose_and_fit_initial_assumption(self, train_data: InputData) -> Tuple[Seq use_input_preprocessing=self.params.get( 'use_input_preprocessing')) - with self.timer.launch_assumption_fit(): + with self.timer.launch_assumption_fit(n_folds=self.params.data['cv_folds']): fitted_assumption = \ assumption_handler.fit_assumption_and_check_correctness(deepcopy(initial_assumption[0]), pipelines_cache=self.pipelines_cache, diff --git a/fedot/api/time.py b/fedot/api/time.py index 77ac4d80f8..66fac7a1d3 100644 --- a/fedot/api/time.py +++ b/fedot/api/time.py @@ -24,6 +24,7 @@ def __init__(self, **time_params): self.tuning_spend_time = datetime.timedelta(minutes=0) self.assumption_fit_spend_time = datetime.timedelta(minutes=0) + self.assumption_fit_spend_time_single_fold = datetime.timedelta(minutes=0) def __define_timeouts_for_stages(self): """ Determine timeouts for tuning and composing """ @@ -69,11 +70,15 @@ def launch_tuning(self): self.tuning_spend_time = datetime.datetime.now() - starting_time_for_tuning @contextmanager - def launch_assumption_fit(self): + def launch_assumption_fit(self, n_folds: int): """ Wrap assumption fit process with timer """ starting_time_for_assumption_fit = datetime.datetime.now() yield - self.assumption_fit_spend_time = datetime.datetime.now() - starting_time_for_assumption_fit + self.assumption_fit_spend_time_single_fold = \ + (datetime.datetime.now() - starting_time_for_assumption_fit) + if n_folds is None: + n_folds = 1 + self.assumption_fit_spend_time = self.assumption_fit_spend_time_single_fold * n_folds def determine_resources_for_tuning(self): """ @@ -81,7 +86,7 @@ def determine_resources_for_tuning(self): how much time and how many iterations are needed for tuning """ - all_spend_time = self.composing_spend_time + self.assumption_fit_spend_time + all_spend_time = self.composing_spend_time + self.assumption_fit_spend_time_single_fold if self.time_for_automl is not None: all_timeout = float(self.time_for_automl) diff --git a/test/integration/api/test_api_utils.py b/test/integration/api/test_api_utils.py index ac23a25339..1909c4c221 100644 --- a/test/integration/api/test_api_utils.py +++ b/test/integration/api/test_api_utils.py @@ -51,8 +51,8 @@ def test_predefined_initial_assumption(): available_operations = ['bernb', 'dt', 'knn', 'lda', 'qda', 'logit', 'rf', 'svc', 'scaling', 'normalization', 'pca', 'kernel_pca'] - model = Fedot(problem='classification', timeout=.1, - logging_level=logging.DEBUG, available_operations=available_operations, + model = Fedot(problem='classification', timeout=1.0, + logging_level=logging.ERROR, available_operations=available_operations, initial_assumption=initial_pipelines) old_params = deepcopy(model.params) model.fit(train_input) diff --git a/test/integration/api/test_main_api.py b/test/integration/api/test_main_api.py index 01eb7800f3..43068b5f23 100644 --- a/test/integration/api/test_main_api.py +++ b/test/integration/api/test_main_api.py @@ -36,10 +36,10 @@ def test_api_predict_correct(task_type, metric_name): train_data, test_data, _ = get_dataset(task_type) changed_api_params = { **TESTS_MAIN_API_DEFAULT_PARAMS, - 'timeout': 1, + 'timeout': 2, 'preset': 'fast_train' } - model = Fedot(problem=task_type, metric=metric_name, **changed_api_params) + model = Fedot(problem=task_type, metric=metric_name, **changed_api_params, cv_folds=2) fedot_model = model.fit(features=train_data) prediction = model.predict(features=test_data) metric = model.get_metrics(metric_names=metric_name, rounding_order=5) diff --git a/test/unit/optimizer/test_external.py b/test/unit/optimizer/test_external.py index 00b83ade23..d552ca8f35 100644 --- a/test/unit/optimizer/test_external.py +++ b/test/unit/optimizer/test_external.py @@ -45,11 +45,11 @@ def test_external_static_optimizer(data_fixture, request): data = request.getfixturevalue(data_fixture) train_data, test_data = train_test_data_setup(data=data) - automl = Fedot(problem='classification', timeout=0.2, logging_level=logging.DEBUG, + automl = Fedot(problem='classification', timeout=0.1, logging_level=logging.DEBUG, preset='fast_train', with_tuning=False, optimizer=partial(StaticOptimizer, node_name='logit'), - pop_size=2) + pop_size=2, cv_folds=None) obtained_pipeline = automl.fit(train_data) automl.predict(test_data)